From 65be7c880d468ebd12fe4cc5b664bae7638f7ccd Mon Sep 17 00:00:00 2001 From: xuqibin <935159042@qq.com> Date: Fri, 5 Jan 2024 11:41:39 +0800 Subject: [PATCH] xuqibin-yangfan --- drivers/Makefile | 2 + drivers/block/nbd.c | 6 - drivers/char/Makefile | 2 + drivers/clk/Kconfig | 7 + drivers/clk/clk.c | 2 +- drivers/clk/rockchip/Kconfig | 42 +- drivers/clk/rockchip/Makefile | 2 + drivers/clk/rockchip/clk-cpu.c | 92 +- drivers/clk/rockchip/clk-ddr.c | 171 +- drivers/clk/rockchip/clk-half-divider.c | 35 +- drivers/clk/rockchip/clk-pll.c | 779 ++- drivers/clk/rockchip/clk-rk3399.c | 589 +- drivers/clk/rockchip/clk.c | 200 +- drivers/clk/rockchip/clk.h | 358 +- drivers/clocksource/Kconfig | 4 +- drivers/clocksource/timer-rockchip.c | 33 + drivers/cpufreq/Kconfig.arm | 10 + drivers/cpufreq/Makefile | 3 +- drivers/cpufreq/cpufreq-dt-platdev.c | 37 +- drivers/cpufreq/cpufreq-dt.c | 168 +- drivers/cpufreq/cpufreq.c | 7 +- drivers/cpufreq/cpufreq_userspace.c | 6 +- drivers/cpuidle/driver.c | 1 + drivers/cpuidle/governor.c | 2 + drivers/devfreq/Kconfig | 13 +- drivers/devfreq/Makefile | 5 +- drivers/devfreq/devfreq.c | 35 + drivers/devfreq/event/Kconfig | 7 + drivers/devfreq/event/Makefile | 1 + drivers/devfreq/event/rockchip-dfi.c | 563 +- drivers/dma-buf/Kconfig | 3 +- drivers/dma-buf/dma-buf-sysfs-stats.h | 27 + drivers/dma-buf/dma-buf.c | 144 +- drivers/dma-buf/dma-fence.c | 70 +- drivers/dma-buf/dma-heap.c | 223 +- drivers/dma-buf/heaps/Kconfig | 16 +- drivers/dma-buf/heaps/Makefile | 3 +- drivers/dma-buf/heaps/cma_heap.c | 336 +- drivers/dma-buf/heaps/system_heap.c | 575 +- drivers/dma-buf/sw_sync.c | 12 + drivers/dma-buf/sync_debug.c | 2 + drivers/dma-buf/sync_debug.h | 7 + drivers/firmware/Kconfig | 9 +- drivers/firmware/Makefile | 1 + drivers/gpio/Kconfig | 8 + drivers/gpio/Makefile | 1 + drivers/gpio/gpiolib-of.c | 11 + drivers/gpio/gpiolib-of.h | 5 + drivers/gpu/drm/Kconfig | 24 +- drivers/gpu/drm/Makefile | 11 +- drivers/gpu/drm/amd/display/Kconfig | 2 - drivers/gpu/drm/bridge/Kconfig | 16 + drivers/gpu/drm/bridge/Makefile | 2 + .../drm/bridge/analogix/analogix_dp_core.c | 966 ++-- .../drm/bridge/analogix/analogix_dp_core.h | 61 +- .../gpu/drm/bridge/analogix/analogix_dp_reg.c | 1040 ++-- .../gpu/drm/bridge/analogix/analogix_dp_reg.h | 99 +- drivers/gpu/drm/bridge/display-connector.c | 132 +- drivers/gpu/drm/bridge/lontium-lt9611.c | 8 +- drivers/gpu/drm/bridge/nwl-dsi.c | 2 +- drivers/gpu/drm/bridge/sii902x.c | 280 +- drivers/gpu/drm/bridge/synopsys/Makefile | 7 +- drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c | 3 + drivers/gpu/drm/bridge/synopsys/dw-hdmi.c | 1744 +++++- drivers/gpu/drm/bridge/synopsys/dw-hdmi.h | 55 + drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c | 111 +- drivers/gpu/drm/drm_atomic_helper.c | 12 +- drivers/gpu/drm/drm_atomic_state_helper.c | 7 + drivers/gpu/drm/drm_atomic_uapi.c | 14 + drivers/gpu/drm/drm_auth.c | 42 +- drivers/gpu/drm/drm_color_mgmt.c | 41 +- drivers/gpu/drm/drm_crtc_internal.h | 22 + drivers/gpu/drm/drm_debugfs.c | 3 +- drivers/gpu/drm/drm_edid.c | 122 +- drivers/gpu/drm/drm_file.c | 69 +- drivers/gpu/drm/drm_fourcc.c | 10 + drivers/gpu/drm/drm_ioctl.c | 8 +- drivers/gpu/drm/drm_lease.c | 81 +- drivers/gpu/drm/drm_mipi_dsi.c | 1 + drivers/gpu/drm/drm_mode_config.c | 16 + drivers/gpu/drm/drm_modes.c | 2 + drivers/gpu/drm/drm_prime.c | 23 + drivers/gpu/drm/drm_vblank.c | 9 +- .../gpu/drm/hisilicon/kirin/kirin_ade_reg.h | 4 +- drivers/gpu/drm/panel/Kconfig | 1 - drivers/gpu/drm/panel/Makefile | 2 + drivers/gpu/drm/panel/panel-simple.c | 511 +- drivers/gpu/drm/panfrost/panfrost_device.h | 8 +- drivers/gpu/drm/panfrost/panfrost_drv.c | 50 +- drivers/gpu/drm/panfrost/panfrost_gem.c | 20 +- drivers/gpu/drm/panfrost/panfrost_job.c | 4 +- drivers/gpu/drm/panfrost/panfrost_mmu.c | 198 +- drivers/gpu/drm/panfrost/panfrost_mmu.h | 5 +- drivers/gpu/drm/panfrost/panfrost_regs.h | 2 - drivers/gpu/drm/rockchip/Kconfig | 73 +- drivers/gpu/drm/rockchip/Makefile | 18 +- .../gpu/drm/rockchip/analogix_dp-rockchip.c | 422 +- drivers/gpu/drm/rockchip/cdn-dp-core.c | 109 +- drivers/gpu/drm/rockchip/cdn-dp-core.h | 5 +- .../gpu/drm/rockchip/dw-mipi-dsi-rockchip.c | 596 ++- drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c | 3202 ++++++++++- drivers/gpu/drm/rockchip/inno_hdmi.c | 2 +- drivers/gpu/drm/rockchip/rk3066_hdmi.c | 2 +- drivers/gpu/drm/rockchip/rockchip_drm_drv.c | 1454 ++++- drivers/gpu/drm/rockchip/rockchip_drm_drv.h | 430 +- drivers/gpu/drm/rockchip/rockchip_drm_fb.c | 153 +- drivers/gpu/drm/rockchip/rockchip_drm_fb.h | 19 + drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c | 18 +- drivers/gpu/drm/rockchip/rockchip_drm_gem.c | 637 ++- drivers/gpu/drm/rockchip/rockchip_drm_gem.h | 47 +- drivers/gpu/drm/rockchip/rockchip_drm_vop.c | 4722 +++++++++++++---- drivers/gpu/drm/rockchip/rockchip_drm_vop.h | 1156 +++- drivers/gpu/drm/rockchip/rockchip_lvds.c | 979 ++-- drivers/gpu/drm/rockchip/rockchip_rgb.c | 557 +- drivers/gpu/drm/rockchip/rockchip_rgb.h | 6 +- drivers/gpu/drm/rockchip/rockchip_vop_reg.c | 2526 ++++++--- drivers/gpu/drm/rockchip/rockchip_vop_reg.h | 818 ++- drivers/i2c/busses/i2c-rk3x.c | 383 +- drivers/i2c/i2c-core-base.c | 51 +- drivers/iio/adc/Kconfig | 7 + drivers/iio/adc/rockchip_saradc.c | 143 +- drivers/input/Kconfig | 2 + drivers/input/Makefile | 1 + drivers/input/touchscreen/Kconfig | 9 + drivers/input/touchscreen/Makefile | 1 + drivers/iommu/Kconfig | 3 +- drivers/iommu/dma-iommu.c | 50 + drivers/iommu/iommu.c | 155 +- drivers/iommu/rockchip-iommu.c | 690 ++- drivers/irqchip/Kconfig | 5 +- drivers/irqchip/irq-gic-v3-its.c | 60 +- drivers/irqchip/irq-gic-v3.c | 26 + drivers/irqchip/irq-gic-v4.c | 19 + drivers/irqchip/irq-meson-gpio.c | 89 +- drivers/mailbox/rockchip-mailbox.c | 135 +- drivers/media/i2c/Kconfig | 34 + drivers/media/i2c/Makefile | 3 + drivers/media/platform/Kconfig | 3 + drivers/media/platform/Makefile | 3 + drivers/media/usb/uvc/uvc_driver.c | 7 +- drivers/media/v4l2-core/v4l2-async.c | 54 + drivers/mfd/rk808.c | 928 +++- drivers/mmc/core/block.c | 3 + drivers/mmc/core/block.h | 1 + drivers/mmc/core/core.h | 2 + drivers/mmc/core/host.c | 58 +- drivers/mmc/core/sdio.c | 425 +- drivers/mmc/host/Makefile | 1 + drivers/mmc/host/dw_mmc-rockchip.c | 95 +- drivers/mmc/host/dw_mmc.h | 2 + drivers/mmc/host/sdhci-of-dwcmshc.c | 260 +- drivers/net/ethernet/stmicro/stmmac/Makefile | 3 +- .../net/ethernet/stmicro/stmmac/dwmac-rk.c | 932 +++- drivers/net/wireless/Kconfig | 1 + drivers/net/wireless/Makefile | 1 + drivers/nvmem/core.c | 4 + drivers/nvmem/rockchip-efuse.c | 348 +- drivers/nvmem/rockchip-otp.c | 22 +- drivers/opp/debugfs.c | 44 + drivers/opp/of.c | 2 +- drivers/pci/controller/Makefile | 6 +- drivers/pci/controller/dwc/Kconfig | 9 + drivers/pci/controller/dwc/Makefile | 1 + drivers/pci/controller/dwc/pcie-designware.h | 1 + drivers/pci/controller/pcie-rockchip.c | 5 + drivers/phy/rockchip/Kconfig | 100 + drivers/phy/rockchip/Makefile | 12 +- .../phy/rockchip/phy-rockchip-inno-dsidphy.c | 434 +- drivers/phy/rockchip/phy-rockchip-inno-usb2.c | 2597 +++++++-- drivers/phy/rockchip/phy-rockchip-pcie.c | 12 +- drivers/phy/rockchip/phy-rockchip-typec.c | 733 ++- drivers/phy/rockchip/phy-rockchip-usb.c | 628 ++- drivers/pinctrl/Kconfig | 7 +- drivers/pinctrl/Makefile | 2 + drivers/pinctrl/pinctrl-rk805.c | 350 +- drivers/pinctrl/pinctrl-rockchip.c | 2862 +++++----- drivers/power/reset/gpio-poweroff.c | 1 - drivers/power/supply/Kconfig | 17 +- drivers/power/supply/Makefile | 1 + drivers/pwm/Kconfig | 6 + drivers/pwm/pwm-rockchip.c | 109 +- drivers/pwm/sysfs.c | 43 + drivers/regulator/Kconfig | 10 +- drivers/regulator/Makefile | 1 + drivers/regulator/core.c | 345 ++ drivers/regulator/fan53555.c | 399 +- drivers/regulator/of_regulator.c | 8 +- drivers/regulator/rk808-regulator.c | 534 +- drivers/rtc/rtc-hym8563.c | 64 +- drivers/soc/rockchip/Kconfig | 119 +- drivers/soc/rockchip/Makefile | 15 + drivers/soc/rockchip/grf.c | 65 + drivers/soc/rockchip/io-domain.c | 95 +- drivers/soc/rockchip/pm_domains.c | 870 ++- drivers/spi/spi-rockchip.c | 183 +- drivers/spi/spidev.c | 1 + drivers/staging/blackbox/Kconfig | 11 + drivers/staging/blackbox/Makefile | 2 + drivers/thermal/rockchip_thermal.c | 584 +- drivers/thermal/thermal_core.c | 1 + drivers/thermal/thermal_core.h | 2 - drivers/tty/serial/8250/8250.h | 15 + drivers/tty/serial/8250/8250_core.c | 10 +- drivers/tty/serial/8250/8250_dma.c | 205 +- drivers/tty/serial/8250/8250_dw.c | 92 +- drivers/tty/serial/8250/8250_dwlib.c | 12 + drivers/tty/serial/8250/8250_port.c | 103 +- drivers/tty/vt/keyboard.c | 1 + drivers/usb/core/hub.c | 3 +- drivers/usb/core/quirks.c | 8 + drivers/usb/gadget/Kconfig | 31 + drivers/usb/gadget/composite.c | 104 +- drivers/usb/gadget/configfs.c | 281 +- drivers/usb/gadget/epautoconf.c | 24 + drivers/usb/gadget/function/f_fs.c | 14 +- drivers/usb/gadget/function/f_uvc.c | 448 +- drivers/usb/gadget/function/u_uvc.h | 9 +- drivers/usb/gadget/function/uvc.h | 9 +- drivers/usb/gadget/function/uvc_queue.c | 8 + drivers/usb/gadget/function/uvc_v4l2.c | 24 +- drivers/usb/gadget/function/uvc_video.c | 40 +- drivers/usb/gadget/udc/core.c | 2 +- drivers/usb/storage/scsiglue.c | 4 + drivers/usb/storage/unusual_devs.h | 6 + drivers/usb/storage/unusual_uas.h | 18 + drivers/video/Kconfig | 4 + drivers/video/Makefile | 1 + drivers/video/backlight/pwm_bl.c | 2 + 228 files changed, 37329 insertions(+), 8895 deletions(-) diff --git a/drivers/Makefile b/drivers/Makefile index d0ff4fdb0dfb..ab33503d8d50 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -6,6 +6,8 @@ # Rewritten to use lists instead of if-statements. # +export VENDOR_DRIVER_DIR=../../../../../$(PRODUCT_PATH)/kernel_core/drivers + obj-y += irqchip/ obj-y += bus/ diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index edfe29990bee..24edfa9f85f1 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -2400,12 +2400,6 @@ static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info) } dev_list = nla_nest_start_noflag(reply, NBD_ATTR_DEVICE_LIST); - if (!dev_list) { - nlmsg_free(reply); - ret = -EMSGSIZE; - goto out; - } - if (index == -1) { ret = idr_for_each(&nbd_index_idr, &status_cb, reply); if (ret) { diff --git a/drivers/char/Makefile b/drivers/char/Makefile index ffce287ef415..9d73dd0bafb2 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile @@ -3,6 +3,8 @@ # Makefile for the kernel character device drivers. # +obj-y += jy.o +obj-y += mcu.o obj-y += mem.o random.o obj-$(CONFIG_TTY_PRINTK) += ttyprintk.o obj-y += misc.o diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index c715d4681a0b..42bb63d80971 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -38,6 +38,13 @@ menuconfig COMMON_CLK if COMMON_CLK +config COMMON_CLK_PROCFS + bool "Common Clock PROCFS interface" + depends on COMMON_CLK && PROC_FS && ARCH_ROCKCHIP + default n + help + Turns on the PROCFS interface for clock. + config COMMON_CLK_WM831X tristate "Clock driver for WM831x/2x PMICs" depends on MFD_WM831X diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index b355d3d40f63..81850505aa15 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -1313,7 +1313,7 @@ static int __init clk_disable_unused(void) return 0; } -late_initcall_sync(clk_disable_unused); +//late_initcall_sync(clk_disable_unused); static int clk_core_determine_round_nolock(struct clk_core *core, struct clk_rate_request *req) diff --git a/drivers/clk/rockchip/Kconfig b/drivers/clk/rockchip/Kconfig index 47cd6c5de837..4da08276f3df 100644 --- a/drivers/clk/rockchip/Kconfig +++ b/drivers/clk/rockchip/Kconfig @@ -2,7 +2,7 @@ # common clock support for ROCKCHIP SoC family. config COMMON_CLK_ROCKCHIP - bool "Rockchip clock controller common support" + tristate "Rockchip clock controller common support" depends on ARCH_ROCKCHIP default ARCH_ROCKCHIP help @@ -11,68 +11,88 @@ config COMMON_CLK_ROCKCHIP if COMMON_CLK_ROCKCHIP config CLK_PX30 bool "Rockchip PX30 clock controller support" - default y + default n help Build the driver for PX30 Clock Driver. config CLK_RV110X bool "Rockchip RV110x clock controller support" - default y + default n help Build the driver for RV110x Clock Driver. config CLK_RK3036 bool "Rockchip RK3036 clock controller support" - default y + default n help Build the driver for RK3036 Clock Driver. config CLK_RK312X bool "Rockchip RK312x clock controller support" - default y + default n help Build the driver for RK312x Clock Driver. config CLK_RK3188 bool "Rockchip RK3188 clock controller support" - default y + default n help Build the driver for RK3188 Clock Driver. config CLK_RK322X bool "Rockchip RK322x clock controller support" - default y + default n help Build the driver for RK322x Clock Driver. config CLK_RK3288 bool "Rockchip RK3288 clock controller support" depends on ARM - default y + default n help Build the driver for RK3288 Clock Driver. config CLK_RK3308 bool "Rockchip RK3308 clock controller support" - default y + default n help Build the driver for RK3308 Clock Driver. config CLK_RK3328 bool "Rockchip RK3328 clock controller support" - default y + default n help Build the driver for RK3328 Clock Driver. config CLK_RK3368 bool "Rockchip RK3368 clock controller support" - default y + default n help Build the driver for RK3368 Clock Driver. config CLK_RK3399 tristate "Rockchip RK3399 clock controller support" + depends on ARM64 || COMPILE_TEST + depends on CPU_RK3399 default y help Build the driver for RK3399 Clock Driver. + +config ROCKCHIP_CLK_COMPENSATION + bool "Rockchip Clk Compensation" + help + Say y here to enable clk compensation(+/- 1000 ppm). + +config ROCKCHIP_DDRCLK_SIP + bool "Rockchip DDR Clk SIP" + default y if CPU_RK3399 + help + Say y here to enable ddr clk sip. + +config ROCKCHIP_PLL_RK3399 + bool "Rockchip PLL Type RK3399" + default y if CPU_RK3399 || CPU_RV1108 + help + Say y here to enable pll type is rk3399. + endif diff --git a/drivers/clk/rockchip/Makefile b/drivers/clk/rockchip/Makefile index a99e4d9bbae1..a966d999fae7 100644 --- a/drivers/clk/rockchip/Makefile +++ b/drivers/clk/rockchip/Makefile @@ -13,6 +13,8 @@ clk-rockchip-y += clk-inverter.o clk-rockchip-y += clk-mmc-phase.o clk-rockchip-y += clk-muxgrf.o clk-rockchip-y += clk-ddr.o +clk-rockchip-y += clk-dclk-divider.o +clk-rockchip-y += clk-pvtm.o clk-rockchip-$(CONFIG_RESET_CONTROLLER) += softrst.o obj-$(CONFIG_CLK_PX30) += clk-px30.o diff --git a/drivers/clk/rockchip/clk-cpu.c b/drivers/clk/rockchip/clk-cpu.c index 0dc478a19451..55416812bed2 100644 --- a/drivers/clk/rockchip/clk-cpu.c +++ b/drivers/clk/rockchip/clk-cpu.c @@ -51,6 +51,7 @@ */ struct rockchip_cpuclk { struct clk_hw hw; + struct clk_hw *pll_hw; struct clk_mux cpu_mux; const struct clk_ops *cpu_mux_ops; @@ -88,10 +89,10 @@ static unsigned long rockchip_cpuclk_recalc_rate(struct clk_hw *hw, { struct rockchip_cpuclk *cpuclk = to_rockchip_cpuclk_hw(hw); const struct rockchip_cpuclk_reg_data *reg_data = cpuclk->reg_data; - u32 clksel0 = readl_relaxed(cpuclk->reg_base + reg_data->core_reg); + u32 clksel0 = readl_relaxed(cpuclk->reg_base + reg_data->core_reg[0]); - clksel0 >>= reg_data->div_core_shift; - clksel0 &= reg_data->div_core_mask; + clksel0 >>= reg_data->div_core_shift[0]; + clksel0 &= reg_data->div_core_mask[0]; return parent_rate / (clksel0 + 1); } @@ -124,6 +125,7 @@ static int rockchip_cpuclk_pre_rate_change(struct rockchip_cpuclk *cpuclk, const struct rockchip_cpuclk_rate_table *rate; unsigned long alt_prate, alt_div; unsigned long flags; + int i = 0; /* check validity of the new rate */ rate = rockchip_get_cpuclk_settings(cpuclk, ndata->new_rate); @@ -133,6 +135,8 @@ static int rockchip_cpuclk_pre_rate_change(struct rockchip_cpuclk *cpuclk, return -EINVAL; } + rockchip_boost_enable_recovery_sw_low(cpuclk->pll_hw); + alt_prate = clk_get_rate(cpuclk->alt_parent); spin_lock_irqsave(cpuclk->lock, flags); @@ -146,10 +150,10 @@ static int rockchip_cpuclk_pre_rate_change(struct rockchip_cpuclk *cpuclk, if (alt_prate > ndata->old_rate) { /* calculate dividers */ alt_div = DIV_ROUND_UP(alt_prate, ndata->old_rate) - 1; - if (alt_div > reg_data->div_core_mask) { + if (alt_div > reg_data->div_core_mask[0]) { pr_warn("%s: limiting alt-divider %lu to %d\n", - __func__, alt_div, reg_data->div_core_mask); - alt_div = reg_data->div_core_mask; + __func__, alt_div, reg_data->div_core_mask[0]); + alt_div = reg_data->div_core_mask[0]; } /* @@ -162,20 +166,21 @@ static int rockchip_cpuclk_pre_rate_change(struct rockchip_cpuclk *cpuclk, pr_debug("%s: setting div %lu as alt-rate %lu > old-rate %lu\n", __func__, alt_div, alt_prate, ndata->old_rate); - writel(HIWORD_UPDATE(alt_div, reg_data->div_core_mask, - reg_data->div_core_shift) | - HIWORD_UPDATE(reg_data->mux_core_alt, - reg_data->mux_core_mask, - reg_data->mux_core_shift), - cpuclk->reg_base + reg_data->core_reg); - } else { - /* select alternate parent */ - writel(HIWORD_UPDATE(reg_data->mux_core_alt, - reg_data->mux_core_mask, - reg_data->mux_core_shift), - cpuclk->reg_base + reg_data->core_reg); + for (i = 0; i < reg_data->num_cores; i++) { + writel(HIWORD_UPDATE(alt_div, reg_data->div_core_mask[i], + reg_data->div_core_shift[i]), + cpuclk->reg_base + reg_data->core_reg[i]); + } } + rockchip_boost_add_core_div(cpuclk->pll_hw, alt_prate); + + /* select alternate parent */ + writel(HIWORD_UPDATE(reg_data->mux_core_alt, + reg_data->mux_core_mask, + reg_data->mux_core_shift), + cpuclk->reg_base + reg_data->core_reg[0]); + spin_unlock_irqrestore(cpuclk->lock, flags); return 0; } @@ -186,6 +191,7 @@ static int rockchip_cpuclk_post_rate_change(struct rockchip_cpuclk *cpuclk, const struct rockchip_cpuclk_reg_data *reg_data = cpuclk->reg_data; const struct rockchip_cpuclk_rate_table *rate; unsigned long flags; + int i = 0; rate = rockchip_get_cpuclk_settings(cpuclk, ndata->new_rate); if (!rate) { @@ -206,16 +212,23 @@ static int rockchip_cpuclk_post_rate_change(struct rockchip_cpuclk *cpuclk, * primary parent by the extra dividers that were needed for the alt. */ - writel(HIWORD_UPDATE(0, reg_data->div_core_mask, - reg_data->div_core_shift) | - HIWORD_UPDATE(reg_data->mux_core_main, - reg_data->mux_core_mask, - reg_data->mux_core_shift), - cpuclk->reg_base + reg_data->core_reg); + writel(HIWORD_UPDATE(reg_data->mux_core_main, + reg_data->mux_core_mask, + reg_data->mux_core_shift), + cpuclk->reg_base + reg_data->core_reg[0]); + + /* remove dividers */ + for (i = 0; i < reg_data->num_cores; i++) { + writel(HIWORD_UPDATE(0, reg_data->div_core_mask[i], + reg_data->div_core_shift[i]), + cpuclk->reg_base + reg_data->core_reg[i]); + } if (ndata->old_rate > ndata->new_rate) rockchip_cpuclk_set_dividers(cpuclk, rate); + rockchip_boost_disable_recovery_sw(cpuclk->pll_hw); + spin_unlock_irqrestore(cpuclk->lock, flags); return 0; } @@ -244,14 +257,16 @@ static int rockchip_cpuclk_notifier_cb(struct notifier_block *nb, } struct clk *rockchip_clk_register_cpuclk(const char *name, - const char *const *parent_names, u8 num_parents, + u8 num_parents, + struct clk *parent, struct clk *alt_parent, const struct rockchip_cpuclk_reg_data *reg_data, const struct rockchip_cpuclk_rate_table *rates, int nrates, void __iomem *reg_base, spinlock_t *lock) { struct rockchip_cpuclk *cpuclk; struct clk_init_data init; - struct clk *clk, *cclk; + struct clk *clk, *cclk, *pll_clk; + const char *parent_name; int ret; if (num_parents < 2) { @@ -259,12 +274,18 @@ struct clk *rockchip_clk_register_cpuclk(const char *name, return ERR_PTR(-EINVAL); } + if (IS_ERR(parent) || IS_ERR(alt_parent)) { + pr_err("%s: invalid parent clock(s)\n", __func__); + return ERR_PTR(-EINVAL); + } + cpuclk = kzalloc(sizeof(*cpuclk), GFP_KERNEL); if (!cpuclk) return ERR_PTR(-ENOMEM); + parent_name = clk_hw_get_name(__clk_get_hw(parent)); init.name = name; - init.parent_names = &parent_names[reg_data->mux_core_main]; + init.parent_names = &parent_name; init.num_parents = 1; init.ops = &rockchip_cpuclk_ops; @@ -281,8 +302,19 @@ struct clk *rockchip_clk_register_cpuclk(const char *name, cpuclk->reg_data = reg_data; cpuclk->clk_nb.notifier_call = rockchip_cpuclk_notifier_cb; cpuclk->hw.init = &init; + if (reg_data->pll_name) { + pll_clk = clk_get_parent(parent); + if (!pll_clk) { + pr_err("%s: could not lookup pll clock: (%s)\n", + __func__, reg_data->pll_name); + ret = -EINVAL; + goto free_cpuclk; + } + cpuclk->pll_hw = __clk_get_hw(pll_clk); + rockchip_boost_init(cpuclk->pll_hw); + } - cpuclk->alt_parent = __clk_lookup(parent_names[reg_data->mux_core_alt]); + cpuclk->alt_parent = alt_parent; if (!cpuclk->alt_parent) { pr_err("%s: could not lookup alternate parent: (%d)\n", __func__, reg_data->mux_core_alt); @@ -297,11 +329,11 @@ struct clk *rockchip_clk_register_cpuclk(const char *name, goto free_cpuclk; } - clk = __clk_lookup(parent_names[reg_data->mux_core_main]); + clk = parent; if (!clk) { pr_err("%s: could not lookup parent clock: (%d) %s\n", __func__, reg_data->mux_core_main, - parent_names[reg_data->mux_core_main]); + parent_name); ret = -EINVAL; goto free_alt_parent; } diff --git a/drivers/clk/rockchip/clk-ddr.c b/drivers/clk/rockchip/clk-ddr.c index 86718c54e56b..3c8bcbee2048 100644 --- a/drivers/clk/rockchip/clk-ddr.c +++ b/drivers/clk/rockchip/clk-ddr.c @@ -8,10 +8,20 @@ #include #include #include +#include +#include #include #include +#include +#include +#ifdef CONFIG_ARM +#include +#endif + #include "clk.h" +#define MHZ (1000000) + struct rockchip_ddrclk { struct clk_hw hw; void __iomem *reg_base; @@ -21,25 +31,47 @@ struct rockchip_ddrclk { int div_shift; int div_width; int ddr_flag; - spinlock_t *lock; }; #define to_rockchip_ddrclk_hw(hw) container_of(hw, struct rockchip_ddrclk, hw) +struct share_params_ddrclk { + u32 hz; + u32 lcdc_type; +}; + +struct rockchip_ddrclk_data { + void __iomem *params; + int (*dmcfreq_wait_complete)(void); +}; + +static struct rockchip_ddrclk_data ddr_data = {NULL, NULL}; + +void rockchip_set_ddrclk_params(void __iomem *params) +{ + ddr_data.params = params; +} +EXPORT_SYMBOL(rockchip_set_ddrclk_params); + +void rockchip_set_ddrclk_dmcfreq_wait_complete(int (*func)(void)) +{ + ddr_data.dmcfreq_wait_complete = func; +} +EXPORT_SYMBOL(rockchip_set_ddrclk_dmcfreq_wait_complete); + static int rockchip_ddrclk_sip_set_rate(struct clk_hw *hw, unsigned long drate, unsigned long prate) { - struct rockchip_ddrclk *ddrclk = to_rockchip_ddrclk_hw(hw); - unsigned long flags; struct arm_smccc_res res; - spin_lock_irqsave(ddrclk->lock, flags); arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, drate, 0, ROCKCHIP_SIP_CONFIG_DRAM_SET_RATE, 0, 0, 0, 0, &res); - spin_unlock_irqrestore(ddrclk->lock, flags); - return res.a0; + if (res.a0) + return 0; + else + return -EPERM; } static unsigned long @@ -87,18 +119,134 @@ static const struct clk_ops rockchip_ddrclk_sip_ops = { .get_parent = rockchip_ddrclk_get_parent, }; +static u32 ddr_clk_cached; + +static int rockchip_ddrclk_scpi_set_rate(struct clk_hw *hw, unsigned long drate, + unsigned long prate) +{ + u32 ret; + u32 lcdc_type = 0; + struct share_params_ddrclk *p; + + p = (struct share_params_ddrclk *)ddr_data.params; + if (p) + lcdc_type = p->lcdc_type; + + ret = scpi_ddr_set_clk_rate(drate / MHZ, lcdc_type); + if (ret) { + ddr_clk_cached = ret; + ret = 0; + } else { + ddr_clk_cached = 0; + ret = -1; + } + + return ret; +} + +static unsigned long rockchip_ddrclk_scpi_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + if (ddr_clk_cached) + return (MHZ * ddr_clk_cached); + else + return (MHZ * scpi_ddr_get_clk_rate()); +} + +static long rockchip_ddrclk_scpi_round_rate(struct clk_hw *hw, + unsigned long rate, + unsigned long *prate) +{ + rate = rate / MHZ; + rate = (rate / 12) * 12; + + return (rate * MHZ); +} + +static const struct clk_ops rockchip_ddrclk_scpi_ops = { + .recalc_rate = rockchip_ddrclk_scpi_recalc_rate, + .set_rate = rockchip_ddrclk_scpi_set_rate, + .round_rate = rockchip_ddrclk_scpi_round_rate, + .get_parent = rockchip_ddrclk_get_parent, +}; + +static int rockchip_ddrclk_sip_set_rate_v2(struct clk_hw *hw, + unsigned long drate, + unsigned long prate) +{ + struct share_params_ddrclk *p; + struct arm_smccc_res res; + + p = (struct share_params_ddrclk *)ddr_data.params; + if (p) + p->hz = drate; + + res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0, + ROCKCHIP_SIP_CONFIG_DRAM_SET_RATE); + + if ((int)res.a1 == SIP_RET_SET_RATE_TIMEOUT) { + if (ddr_data.dmcfreq_wait_complete) + ddr_data.dmcfreq_wait_complete(); + } + + return res.a0; +} + +static unsigned long rockchip_ddrclk_sip_recalc_rate_v2 + (struct clk_hw *hw, unsigned long parent_rate) +{ + struct arm_smccc_res res; + + res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0, + ROCKCHIP_SIP_CONFIG_DRAM_GET_RATE); + if (!res.a0) + return res.a1; + else + return 0; +} + +static long rockchip_ddrclk_sip_round_rate_v2(struct clk_hw *hw, + unsigned long rate, + unsigned long *prate) +{ + struct share_params_ddrclk *p; + struct arm_smccc_res res; + + p = (struct share_params_ddrclk *)ddr_data.params; + if (p) + p->hz = rate; + + res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0, + ROCKCHIP_SIP_CONFIG_DRAM_ROUND_RATE); + if (!res.a0) + return res.a1; + else + return 0; +} + +static const struct clk_ops rockchip_ddrclk_sip_ops_v2 = { + .recalc_rate = rockchip_ddrclk_sip_recalc_rate_v2, + .set_rate = rockchip_ddrclk_sip_set_rate_v2, + .round_rate = rockchip_ddrclk_sip_round_rate_v2, + .get_parent = rockchip_ddrclk_get_parent, +}; + struct clk *rockchip_clk_register_ddrclk(const char *name, int flags, const char *const *parent_names, u8 num_parents, int mux_offset, int mux_shift, int mux_width, int div_shift, int div_width, - int ddr_flag, void __iomem *reg_base, - spinlock_t *lock) + int ddr_flag, void __iomem *reg_base) { struct rockchip_ddrclk *ddrclk; struct clk_init_data init; struct clk *clk; +#ifdef CONFIG_ARM + if (!psci_smp_available()) + return NULL; +#endif + ddrclk = kzalloc(sizeof(*ddrclk), GFP_KERNEL); if (!ddrclk) return ERR_PTR(-ENOMEM); @@ -114,6 +262,12 @@ struct clk *rockchip_clk_register_ddrclk(const char *name, int flags, case ROCKCHIP_DDRCLK_SIP: init.ops = &rockchip_ddrclk_sip_ops; break; + case ROCKCHIP_DDRCLK_SCPI: + init.ops = &rockchip_ddrclk_scpi_ops; + break; + case ROCKCHIP_DDRCLK_SIP_V2: + init.ops = &rockchip_ddrclk_sip_ops_v2; + break; default: pr_err("%s: unsupported ddrclk type %d\n", __func__, ddr_flag); kfree(ddrclk); @@ -121,7 +275,6 @@ struct clk *rockchip_clk_register_ddrclk(const char *name, int flags, } ddrclk->reg_base = reg_base; - ddrclk->lock = lock; ddrclk->hw.init = &init; ddrclk->mux_offset = mux_offset; ddrclk->mux_shift = mux_shift; diff --git a/drivers/clk/rockchip/clk-half-divider.c b/drivers/clk/rockchip/clk-half-divider.c index ccd5c270c213..b978af08d84f 100644 --- a/drivers/clk/rockchip/clk-half-divider.c +++ b/drivers/clk/rockchip/clk-half-divider.c @@ -14,9 +14,9 @@ static bool _is_best_half_div(unsigned long rate, unsigned long now, unsigned long best, unsigned long flags) { if (flags & CLK_DIVIDER_ROUND_CLOSEST) - return abs(rate - now) < abs(rate - best); + return abs(rate - now) <= abs(rate - best); - return now <= rate && now > best; + return now <= rate && now >= best; } static unsigned long clk_half_divider_recalc_rate(struct clk_hw *hw, @@ -38,7 +38,7 @@ static int clk_half_divider_bestdiv(struct clk_hw *hw, unsigned long rate, { unsigned int i, bestdiv = 0; unsigned long parent_rate, best = 0, now, maxdiv; - unsigned long parent_rate_saved = *best_parent_rate; + bool is_bestdiv = false; if (!rate) rate = 1; @@ -51,7 +51,7 @@ static int clk_half_divider_bestdiv(struct clk_hw *hw, unsigned long rate, if (bestdiv < 3) bestdiv = 0; else - bestdiv = (bestdiv - 3) / 2; + bestdiv = DIV_ROUND_UP(bestdiv - 3, 2); bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv; return bestdiv; } @@ -63,28 +63,20 @@ static int clk_half_divider_bestdiv(struct clk_hw *hw, unsigned long rate, maxdiv = min(ULONG_MAX / rate, maxdiv); for (i = 0; i <= maxdiv; i++) { - if (((u64)rate * (i * 2 + 3)) == ((u64)parent_rate_saved * 2)) { - /* - * It's the most ideal case if the requested rate can be - * divided from parent clock without needing to change - * parent rate, so return the divider immediately. - */ - *best_parent_rate = parent_rate_saved; - return i; - } parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), ((u64)rate * (i * 2 + 3)) / 2); now = DIV_ROUND_UP_ULL(((u64)parent_rate * 2), (i * 2 + 3)); if (_is_best_half_div(rate, now, best, flags)) { + is_bestdiv = true; bestdiv = i; best = now; *best_parent_rate = parent_rate; } } - if (!bestdiv) { + if (!is_bestdiv) { bestdiv = div_mask(width); *best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), 1); } @@ -114,7 +106,7 @@ static int clk_half_divider_set_rate(struct clk_hw *hw, unsigned long rate, u32 val; value = DIV_ROUND_UP_ULL(((u64)parent_rate * 2), rate); - value = (value - 3) / 2; + value = DIV_ROUND_UP(value - 3, 2); value = min_t(unsigned int, value, div_mask(divider->width)); if (divider->lock) @@ -160,10 +152,10 @@ struct clk *rockchip_clk_register_halfdiv(const char *name, u8 num_parents, void __iomem *base, int muxdiv_offset, u8 mux_shift, u8 mux_width, u8 mux_flags, - u8 div_shift, u8 div_width, - u8 div_flags, int gate_offset, - u8 gate_shift, u8 gate_flags, - unsigned long flags, + int div_offset, u8 div_shift, + u8 div_width, u8 div_flags, + int gate_offset, u8 gate_shift, + u8 gate_flags, unsigned long flags, spinlock_t *lock) { struct clk_hw *hw = ERR_PTR(-ENOMEM); @@ -205,7 +197,10 @@ struct clk *rockchip_clk_register_halfdiv(const char *name, goto err_div; div->flags = div_flags; - div->reg = base + muxdiv_offset; + if (div_offset) + div->reg = base + div_offset; + else + div->reg = base + muxdiv_offset; div->shift = div_shift; div->width = div_width; div->lock = lock; diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c index d0bd513ff3c3..c7e28d95f8a0 100644 --- a/drivers/clk/rockchip/clk-pll.c +++ b/drivers/clk/rockchip/clk-pll.c @@ -15,6 +15,9 @@ #include #include #include +#include +#include +#include #include "clk.h" #define PLL_MODE_MASK 0x3 @@ -38,15 +41,291 @@ struct rockchip_clk_pll { u8 flags; const struct rockchip_pll_rate_table *rate_table; unsigned int rate_count; + int sel; + unsigned long scaling; spinlock_t *lock; struct rockchip_clk_provider *ctx; + + bool boost_enabled; + u32 boost_backup_pll_usage; + unsigned long boost_backup_pll_rate; + unsigned long boost_low_rate; + unsigned long boost_high_rate; + struct regmap *boost; +#ifdef CONFIG_DEBUG_FS + struct hlist_node debug_node; +#endif }; #define to_rockchip_clk_pll(_hw) container_of(_hw, struct rockchip_clk_pll, hw) #define to_rockchip_clk_pll_nb(nb) \ container_of(nb, struct rockchip_clk_pll, clk_nb) +static void rockchip_boost_disable_low(struct rockchip_clk_pll *pll); + +#define MHZ (1000UL * 1000UL) +#define KHZ (1000UL) + +/* CLK_PLL_TYPE_RK3066_AUTO type ops */ +#define PLL_FREF_MIN (269 * KHZ) +#define PLL_FREF_MAX (2200 * MHZ) + +#define PLL_FVCO_MIN (440 * MHZ) +#define PLL_FVCO_MAX (2200 * MHZ) + +#define PLL_FOUT_MIN (27500 * KHZ) +#define PLL_FOUT_MAX (2200 * MHZ) + +#define PLL_NF_MAX (4096) +#define PLL_NR_MAX (64) +#define PLL_NO_MAX (16) + +/* CLK_PLL_TYPE_RK3036/3366/3399_AUTO type ops */ +#define MIN_FOUTVCO_FREQ (800 * MHZ) +#define MAX_FOUTVCO_FREQ (2000 * MHZ) + +static struct rockchip_pll_rate_table auto_table; +#ifdef CONFIG_DEBUG_FS +static HLIST_HEAD(clk_boost_list); +static DEFINE_MUTEX(clk_boost_lock); +#endif + +int rockchip_pll_clk_adaptive_scaling(struct clk *clk, int sel) +{ + struct clk *parent = clk_get_parent(clk); + struct rockchip_clk_pll *pll; + + if (IS_ERR_OR_NULL(parent)) + return -EINVAL; + + pll = to_rockchip_clk_pll(__clk_get_hw(parent)); + if (!pll) + return -EINVAL; + + pll->sel = sel; + + return 0; +} +EXPORT_SYMBOL(rockchip_pll_clk_adaptive_scaling); + +int rockchip_pll_clk_rate_to_scale(struct clk *clk, unsigned long rate) +{ + const struct rockchip_pll_rate_table *rate_table; + struct clk *parent = clk_get_parent(clk); + struct rockchip_clk_pll *pll; + unsigned int i; + + if (IS_ERR_OR_NULL(parent)) + return -EINVAL; + + pll = to_rockchip_clk_pll(__clk_get_hw(parent)); + if (!pll) + return -EINVAL; + + rate_table = pll->rate_table; + for (i = 0; i < pll->rate_count; i++) { + if (rate >= rate_table[i].rate) + return i; + } + + return -EINVAL; +} +EXPORT_SYMBOL(rockchip_pll_clk_rate_to_scale); + +int rockchip_pll_clk_scale_to_rate(struct clk *clk, unsigned int scale) +{ + const struct rockchip_pll_rate_table *rate_table; + struct clk *parent = clk_get_parent(clk); + struct rockchip_clk_pll *pll; + unsigned int i; + + if (IS_ERR_OR_NULL(parent)) + return -EINVAL; + + pll = to_rockchip_clk_pll(__clk_get_hw(parent)); + if (!pll) + return -EINVAL; + + rate_table = pll->rate_table; + for (i = 0; i < pll->rate_count; i++) { + if (i == scale) + return rate_table[i].rate; + } + + return -EINVAL; +} +EXPORT_SYMBOL(rockchip_pll_clk_scale_to_rate); + +static struct rockchip_pll_rate_table *rk_pll_rate_table_get(void) +{ + return &auto_table; +} + +static int rockchip_pll_clk_set_postdiv(unsigned long fout_hz, + u32 *postdiv1, + u32 *postdiv2, + u32 *foutvco) +{ + unsigned long freq; + + if (fout_hz < MIN_FOUTVCO_FREQ) { + for (*postdiv1 = 1; *postdiv1 <= 7; (*postdiv1)++) { + for (*postdiv2 = 1; *postdiv2 <= 7; (*postdiv2)++) { + freq = fout_hz * (*postdiv1) * (*postdiv2); + if (freq >= MIN_FOUTVCO_FREQ && + freq <= MAX_FOUTVCO_FREQ) { + *foutvco = freq; + return 0; + } + } + } + pr_err("CANNOT FIND postdiv1/2 to make fout in range from 800M to 2000M,fout = %lu\n", + fout_hz); + } else { + *postdiv1 = 1; + *postdiv2 = 1; + } + return 0; +} + +static struct rockchip_pll_rate_table * +rockchip_pll_clk_set_by_auto(struct rockchip_clk_pll *pll, + unsigned long fin_hz, + unsigned long fout_hz) +{ + struct rockchip_pll_rate_table *rate_table = rk_pll_rate_table_get(); + /* FIXME set postdiv1/2 always 1*/ + u32 foutvco = fout_hz; + u64 fin_64, frac_64; + u32 f_frac, postdiv1, postdiv2; + unsigned long clk_gcd = 0; + + if (fin_hz == 0 || fout_hz == 0 || fout_hz == fin_hz) + return NULL; + + rockchip_pll_clk_set_postdiv(fout_hz, &postdiv1, &postdiv2, &foutvco); + rate_table->postdiv1 = postdiv1; + rate_table->postdiv2 = postdiv2; + rate_table->dsmpd = 1; + + if (fin_hz / MHZ * MHZ == fin_hz && fout_hz / MHZ * MHZ == fout_hz) { + fin_hz /= MHZ; + foutvco /= MHZ; + clk_gcd = gcd(fin_hz, foutvco); + rate_table->refdiv = fin_hz / clk_gcd; + rate_table->fbdiv = foutvco / clk_gcd; + + rate_table->frac = 0; + + pr_debug("fin = %lu, fout = %lu, clk_gcd = %lu, refdiv = %u, fbdiv = %u, postdiv1 = %u, postdiv2 = %u, frac = %u\n", + fin_hz, fout_hz, clk_gcd, rate_table->refdiv, + rate_table->fbdiv, rate_table->postdiv1, + rate_table->postdiv2, rate_table->frac); + } else { + pr_debug("frac div running, fin_hz = %lu, fout_hz = %lu, fin_INT_mhz = %lu, fout_INT_mhz = %lu\n", + fin_hz, fout_hz, + fin_hz / MHZ * MHZ, + fout_hz / MHZ * MHZ); + pr_debug("frac get postdiv1 = %u, postdiv2 = %u, foutvco = %u\n", + rate_table->postdiv1, rate_table->postdiv2, foutvco); + clk_gcd = gcd(fin_hz / MHZ, foutvco / MHZ); + rate_table->refdiv = fin_hz / MHZ / clk_gcd; + rate_table->fbdiv = foutvco / MHZ / clk_gcd; + pr_debug("frac get refdiv = %u, fbdiv = %u\n", + rate_table->refdiv, rate_table->fbdiv); + + rate_table->frac = 0; + + f_frac = (foutvco % MHZ); + fin_64 = fin_hz; + do_div(fin_64, (u64)rate_table->refdiv); + frac_64 = (u64)f_frac << 24; + do_div(frac_64, fin_64); + rate_table->frac = (u32)frac_64; + if (rate_table->frac > 0) + rate_table->dsmpd = 0; + pr_debug("frac = %x\n", rate_table->frac); + } + return rate_table; +} + +static struct rockchip_pll_rate_table * +rockchip_rk3066_pll_clk_set_by_auto(struct rockchip_clk_pll *pll, + unsigned long fin_hz, + unsigned long fout_hz) +{ + struct rockchip_pll_rate_table *rate_table = rk_pll_rate_table_get(); + u32 nr, nf, no, nonr; + u32 nr_out, nf_out, no_out; + u32 n; + u32 numerator, denominator; + u64 fref, fvco, fout; + unsigned long clk_gcd = 0; + + nr_out = PLL_NR_MAX + 1; + no_out = 0; + nf_out = 0; + + if (fin_hz == 0 || fout_hz == 0 || fout_hz == fin_hz) + return NULL; + + clk_gcd = gcd(fin_hz, fout_hz); + + numerator = fout_hz / clk_gcd; + denominator = fin_hz / clk_gcd; + + for (n = 1;; n++) { + nf = numerator * n; + nonr = denominator * n; + if (nf > PLL_NF_MAX || nonr > (PLL_NO_MAX * PLL_NR_MAX)) + break; + + for (no = 1; no <= PLL_NO_MAX; no++) { + if (!(no == 1 || !(no % 2))) + continue; + + if (nonr % no) + continue; + nr = nonr / no; + + if (nr > PLL_NR_MAX) + continue; + + fref = fin_hz / nr; + if (fref < PLL_FREF_MIN || fref > PLL_FREF_MAX) + continue; + + fvco = fref * nf; + if (fvco < PLL_FVCO_MIN || fvco > PLL_FVCO_MAX) + continue; + + fout = fvco / no; + if (fout < PLL_FOUT_MIN || fout > PLL_FOUT_MAX) + continue; + + /* select the best from all available PLL settings */ + if ((no > no_out) || + ((no == no_out) && (nr < nr_out))) { + nr_out = nr; + nf_out = nf; + no_out = no; + } + } + } + + /* output the best PLL setting */ + if ((nr_out <= PLL_NR_MAX) && (no_out > 0)) { + rate_table->nr = nr_out; + rate_table->nf = nf_out; + rate_table->no = no_out; + } else { + return NULL; + } + + return rate_table; +} + static const struct rockchip_pll_rate_table *rockchip_get_pll_settings( struct rockchip_clk_pll *pll, unsigned long rate) { @@ -54,28 +333,27 @@ static const struct rockchip_pll_rate_table *rockchip_get_pll_settings( int i; for (i = 0; i < pll->rate_count; i++) { - if (rate == rate_table[i].rate) + if (rate == rate_table[i].rate) { + if (i < pll->sel) { + pll->scaling = rate; + return &rate_table[pll->sel]; + } + pll->scaling = 0; return &rate_table[i]; + } } + pll->scaling = 0; - return NULL; + if (pll->type == pll_rk3066) + return rockchip_rk3066_pll_clk_set_by_auto(pll, 24 * MHZ, rate); + else + return rockchip_pll_clk_set_by_auto(pll, 24 * MHZ, rate); } static long rockchip_pll_round_rate(struct clk_hw *hw, unsigned long drate, unsigned long *prate) { - struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw); - const struct rockchip_pll_rate_table *rate_table = pll->rate_table; - int i; - - /* Assumming rate_table is in descending order */ - for (i = 0; i < pll->rate_count; i++) { - if (drate >= rate_table[i].rate) - return rate_table[i].rate; - } - - /* return minimum supported value */ - return rate_table[i - 1].rate; + return drate; } /* @@ -136,6 +414,30 @@ static int rockchip_rk3036_pll_wait_lock(struct rockchip_clk_pll *pll) return ret; } +static unsigned long +rockchip_rk3036_pll_con_to_rate(struct rockchip_clk_pll *pll, + u32 con0, u32 con1) +{ + unsigned int fbdiv, postdiv1, refdiv, postdiv2; + u64 rate64 = 24000000; + + fbdiv = ((con0 >> RK3036_PLLCON0_FBDIV_SHIFT) & + RK3036_PLLCON0_FBDIV_MASK); + postdiv1 = ((con0 >> RK3036_PLLCON0_POSTDIV1_SHIFT) & + RK3036_PLLCON0_POSTDIV1_MASK); + refdiv = ((con1 >> RK3036_PLLCON1_REFDIV_SHIFT) & + RK3036_PLLCON1_REFDIV_MASK); + postdiv2 = ((con1 >> RK3036_PLLCON1_POSTDIV2_SHIFT) & + RK3036_PLLCON1_POSTDIV2_MASK); + + rate64 *= fbdiv; + do_div(rate64, refdiv); + do_div(rate64, postdiv1); + do_div(rate64, postdiv2); + + return (unsigned long)rate64; +} + static void rockchip_rk3036_pll_get_params(struct rockchip_clk_pll *pll, struct rockchip_pll_rate_table *rate) { @@ -165,7 +467,10 @@ static unsigned long rockchip_rk3036_pll_recalc_rate(struct clk_hw *hw, { struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw); struct rockchip_pll_rate_table cur; - u64 rate64 = prate; + u64 rate64 = prate, frac_rate64 = prate; + + if (pll->sel && pll->scaling) + return pll->scaling; rockchip_rk3036_pll_get_params(pll, &cur); @@ -174,7 +479,7 @@ static unsigned long rockchip_rk3036_pll_recalc_rate(struct clk_hw *hw, if (cur.dsmpd == 0) { /* fractional mode */ - u64 frac_rate64 = prate * cur.frac; + frac_rate64 *= cur.frac; do_div(frac_rate64, cur.refdiv); rate64 += frac_rate64 >> 24; @@ -231,6 +536,8 @@ static int rockchip_rk3036_pll_set_params(struct rockchip_clk_pll *pll, pllcon |= rate->frac << RK3036_PLLCON2_FRAC_SHIFT; writel_relaxed(pllcon, pll->reg_base + RK3036_PLLCON(2)); + rockchip_boost_disable_low(pll); + /* wait for the pll to lock */ ret = rockchip_rk3036_pll_wait_lock(pll); if (ret) { @@ -412,6 +719,9 @@ static unsigned long rockchip_rk3066_pll_recalc_rate(struct clk_hw *hw, return prate; } + if (pll->sel && pll->scaling) + return pll->scaling; + rockchip_rk3066_pll_get_params(pll, &cur); rate64 *= cur.nf; @@ -485,9 +795,18 @@ static int rockchip_rk3066_pll_set_rate(struct clk_hw *hw, unsigned long drate, { struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw); const struct rockchip_pll_rate_table *rate; + unsigned long old_rate = rockchip_rk3066_pll_recalc_rate(hw, prate); + struct regmap *grf = pll->ctx->grf; + int ret; - pr_debug("%s: changing %s to %lu with a parent rate of %lu\n", - __func__, clk_hw_get_name(hw), drate, prate); + if (IS_ERR(grf)) { + pr_debug("%s: grf regmap not available, aborting rate change\n", + __func__); + return PTR_ERR(grf); + } + + pr_debug("%s: changing %s from %lu to %lu with a parent rate of %lu\n", + __func__, clk_hw_get_name(hw), old_rate, drate, prate); /* Get required rate settings from table */ rate = rockchip_get_pll_settings(pll, drate); @@ -497,7 +816,11 @@ static int rockchip_rk3066_pll_set_rate(struct clk_hw *hw, unsigned long drate, return -EINVAL; } - return rockchip_rk3066_pll_set_params(pll, rate); + ret = rockchip_rk3066_pll_set_params(pll, rate); + if (ret) + pll->scaling = 0; + + return ret; } static int rockchip_rk3066_pll_enable(struct clk_hw *hw) @@ -649,6 +972,9 @@ static unsigned long rockchip_rk3399_pll_recalc_rate(struct clk_hw *hw, struct rockchip_pll_rate_table cur; u64 rate64 = prate; + if (pll->sel && pll->scaling) + return pll->scaling; + rockchip_rk3399_pll_get_params(pll, &cur); rate64 *= cur.fbdiv; @@ -692,6 +1018,11 @@ static int rockchip_rk3399_pll_set_params(struct rockchip_clk_pll *pll, rate_change_remuxed = 1; } + /* set pll power down */ + writel(HIWORD_UPDATE(RK3399_PLLCON3_PWRDOWN, + RK3399_PLLCON3_PWRDOWN, 0), + pll->reg_base + RK3399_PLLCON(3)); + /* update pll values */ writel_relaxed(HIWORD_UPDATE(rate->fbdiv, RK3399_PLLCON0_FBDIV_MASK, RK3399_PLLCON0_FBDIV_SHIFT), @@ -715,6 +1046,11 @@ static int rockchip_rk3399_pll_set_params(struct rockchip_clk_pll *pll, RK3399_PLLCON3_DSMPD_SHIFT), pll->reg_base + RK3399_PLLCON(3)); + /* set pll power up */ + writel(HIWORD_UPDATE(0, + RK3399_PLLCON3_PWRDOWN, 0), + pll->reg_base + RK3399_PLLCON(3)); + /* wait for the pll to lock */ ret = rockchip_rk3399_pll_wait_lock(pll); if (ret) { @@ -734,9 +1070,11 @@ static int rockchip_rk3399_pll_set_rate(struct clk_hw *hw, unsigned long drate, { struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw); const struct rockchip_pll_rate_table *rate; + unsigned long old_rate = rockchip_rk3399_pll_recalc_rate(hw, prate); + int ret; - pr_debug("%s: changing %s to %lu with a parent rate of %lu\n", - __func__, __clk_get_name(hw->clk), drate, prate); + pr_debug("%s: changing %s from %lu to %lu with a parent rate of %lu\n", + __func__, __clk_get_name(hw->clk), old_rate, drate, prate); /* Get required rate settings from table */ rate = rockchip_get_pll_settings(pll, drate); @@ -746,7 +1084,11 @@ static int rockchip_rk3399_pll_set_rate(struct clk_hw *hw, unsigned long drate, return -EINVAL; } - return rockchip_rk3399_pll_set_params(pll, rate); + ret = rockchip_rk3399_pll_set_params(pll, rate); + if (ret) + pll->scaling = 0; + + return ret; } static int rockchip_rk3399_pll_enable(struct clk_hw *hw) @@ -842,6 +1184,80 @@ static const struct clk_ops rockchip_rk3399_pll_clk_ops = { .init = rockchip_rk3399_pll_init, }; +#ifdef CONFIG_ROCKCHIP_CLK_COMPENSATION +int rockchip_pll_clk_compensation(struct clk *clk, int ppm) +{ + struct clk *parent = clk_get_parent(clk); + struct rockchip_clk_pll *pll; + static u32 frac, fbdiv; + bool negative; + u32 pllcon, pllcon0, pllcon2, fbdiv_mask, frac_mask, frac_shift; + u64 fracdiv, m, n; + + if ((ppm > 1000) || (ppm < -1000)) + return -EINVAL; + + if (IS_ERR_OR_NULL(parent)) + return -EINVAL; + + pll = to_rockchip_clk_pll(__clk_get_hw(parent)); + if (!pll) + return -EINVAL; + + switch (pll->type) { + case pll_rk3036: + case pll_rk3328: + pllcon0 = RK3036_PLLCON(0); + pllcon2 = RK3036_PLLCON(2); + fbdiv_mask = RK3036_PLLCON0_FBDIV_MASK; + frac_mask = RK3036_PLLCON2_FRAC_MASK; + frac_shift = RK3036_PLLCON2_FRAC_SHIFT; + break; + case pll_rk3066: + return -EINVAL; + case pll_rk3399: + pllcon0 = RK3399_PLLCON(0); + pllcon2 = RK3399_PLLCON(2); + fbdiv_mask = RK3399_PLLCON0_FBDIV_MASK; + frac_mask = RK3399_PLLCON2_FRAC_MASK; + frac_shift = RK3399_PLLCON2_FRAC_SHIFT; + break; + default: + return -EINVAL; + } + + negative = !!(ppm & BIT(31)); + ppm = negative ? ~ppm + 1 : ppm; + + if (!frac) { + frac = readl_relaxed(pll->reg_base + pllcon2) & frac_mask; + fbdiv = readl_relaxed(pll->reg_base + pllcon0) & fbdiv_mask; + } + + /* + * delta frac frac ppm + * -------------- = (fbdiv + ----------) * --------- + * 1 << 24 1 << 24 1000000 + * + */ + m = div64_u64((uint64_t)frac * ppm, 1000000); + n = div64_u64((uint64_t)ppm << 24, 1000000) * fbdiv; + + fracdiv = negative ? frac - (m + n) : frac + (m + n); + + if (!frac || fracdiv > frac_mask) + return -EINVAL; + + pllcon = readl_relaxed(pll->reg_base + pllcon2); + pllcon &= ~(frac_mask << frac_shift); + pllcon |= fracdiv << frac_shift; + writel_relaxed(pllcon, pll->reg_base + pllcon2); + + return 0; +} +EXPORT_SYMBOL(rockchip_pll_clk_compensation); +#endif + /* * Common registering of pll clocks */ @@ -914,8 +1330,12 @@ struct clk *rockchip_clk_register_pll(struct rockchip_clk_provider *ctx, /* now create the actual pll */ init.name = pll_name; +#ifndef CONFIG_ROCKCHIP_LOW_PERFORMANCE /* keep all plls untouched for now */ init.flags = flags | CLK_IGNORE_UNUSED; +#else + init.flags = flags; +#endif init.parent_names = &parent_names[0]; init.num_parents = 1; @@ -940,7 +1360,7 @@ struct clk *rockchip_clk_register_pll(struct rockchip_clk_provider *ctx, switch (pll_type) { case pll_rk3036: case pll_rk3328: - if (!pll->rate_table) + if (!pll->rate_table || IS_ERR(ctx->grf)) init.ops = &rockchip_rk3036_pll_clk_norate_ops; else init.ops = &rockchip_rk3036_pll_clk_ops; @@ -988,3 +1408,316 @@ struct clk *rockchip_clk_register_pll(struct rockchip_clk_provider *ctx, kfree(pll); return mux_clk; } + +static unsigned long rockchip_pll_con_to_rate(struct rockchip_clk_pll *pll, + u32 con0, u32 con1) +{ + switch (pll->type) { + case pll_rk3036: + case pll_rk3328: + return rockchip_rk3036_pll_con_to_rate(pll, con0, con1); + case pll_rk3066: + break; + case pll_rk3399: + break; + default: + pr_warn("%s: Unknown pll type\n", __func__); + } + + return 0; +} + +void rockchip_boost_init(struct clk_hw *hw) +{ + struct rockchip_clk_pll *pll; + struct device_node *np; + u32 value, con0, con1; + + if (!hw) + return; + pll = to_rockchip_clk_pll(hw); + np = of_parse_phandle(pll->ctx->cru_node, "rockchip,boost", 0); + if (!np) { + pr_debug("%s: failed to get boost np\n", __func__); + return; + } + pll->boost = syscon_node_to_regmap(np); + if (IS_ERR(pll->boost)) { + pr_debug("%s: failed to get boost regmap\n", __func__); + return; + } + + if (!of_property_read_u32(np, "rockchip,boost-low-con0", &con0) && + !of_property_read_u32(np, "rockchip,boost-low-con1", &con1)) { + pr_debug("boost-low-con=0x%x 0x%x\n", con0, con1); + regmap_write(pll->boost, BOOST_PLL_L_CON(0), + HIWORD_UPDATE(con0, BOOST_PLL_CON_MASK, 0)); + regmap_write(pll->boost, BOOST_PLL_L_CON(1), + HIWORD_UPDATE(con1, BOOST_PLL_CON_MASK, 0)); + pll->boost_low_rate = rockchip_pll_con_to_rate(pll, con0, + con1); + pr_debug("boost-low-rate=%lu\n", pll->boost_low_rate); + } + if (!of_property_read_u32(np, "rockchip,boost-high-con0", &con0) && + !of_property_read_u32(np, "rockchip,boost-high-con1", &con1)) { + pr_debug("boost-high-con=0x%x 0x%x\n", con0, con1); + regmap_write(pll->boost, BOOST_PLL_H_CON(0), + HIWORD_UPDATE(con0, BOOST_PLL_CON_MASK, 0)); + regmap_write(pll->boost, BOOST_PLL_H_CON(1), + HIWORD_UPDATE(con1, BOOST_PLL_CON_MASK, 0)); + pll->boost_high_rate = rockchip_pll_con_to_rate(pll, con0, + con1); + pr_debug("boost-high-rate=%lu\n", pll->boost_high_rate); + } + if (!of_property_read_u32(np, "rockchip,boost-backup-pll", &value)) { + pr_debug("boost-backup-pll=0x%x\n", value); + regmap_write(pll->boost, BOOST_CLK_CON, + HIWORD_UPDATE(value, BOOST_BACKUP_PLL_MASK, + BOOST_BACKUP_PLL_SHIFT)); + } + if (!of_property_read_u32(np, "rockchip,boost-backup-pll-usage", + &pll->boost_backup_pll_usage)) { + pr_debug("boost-backup-pll-usage=0x%x\n", + pll->boost_backup_pll_usage); + regmap_write(pll->boost, BOOST_CLK_CON, + HIWORD_UPDATE(pll->boost_backup_pll_usage, + BOOST_BACKUP_PLL_USAGE_MASK, + BOOST_BACKUP_PLL_USAGE_SHIFT)); + } + if (!of_property_read_u32(np, "rockchip,boost-switch-threshold", + &value)) { + pr_debug("boost-switch-threshold=0x%x\n", value); + regmap_write(pll->boost, BOOST_SWITCH_THRESHOLD, value); + } + if (!of_property_read_u32(np, "rockchip,boost-statis-threshold", + &value)) { + pr_debug("boost-statis-threshold=0x%x\n", value); + regmap_write(pll->boost, BOOST_STATIS_THRESHOLD, value); + } + if (!of_property_read_u32(np, "rockchip,boost-statis-enable", + &value)) { + pr_debug("boost-statis-enable=0x%x\n", value); + regmap_write(pll->boost, BOOST_BOOST_CON, + HIWORD_UPDATE(value, BOOST_STATIS_ENABLE_MASK, + BOOST_STATIS_ENABLE_SHIFT)); + } + if (!of_property_read_u32(np, "rockchip,boost-enable", &value)) { + pr_debug("boost-enable=0x%x\n", value); + regmap_write(pll->boost, BOOST_BOOST_CON, + HIWORD_UPDATE(value, BOOST_ENABLE_MASK, + BOOST_ENABLE_SHIFT)); + if (value) + pll->boost_enabled = true; + } +#ifdef CONFIG_DEBUG_FS + if (pll->boost_enabled) { + mutex_lock(&clk_boost_lock); + hlist_add_head(&pll->debug_node, &clk_boost_list); + mutex_unlock(&clk_boost_lock); + } +#endif +} + +void rockchip_boost_enable_recovery_sw_low(struct clk_hw *hw) +{ + struct rockchip_clk_pll *pll; + unsigned int val; + + if (!hw) + return; + pll = to_rockchip_clk_pll(hw); + if (!pll->boost_enabled) + return; + + regmap_write(pll->boost, BOOST_BOOST_CON, + HIWORD_UPDATE(1, BOOST_RECOVERY_MASK, + BOOST_RECOVERY_SHIFT)); + do { + regmap_read(pll->boost, BOOST_FSM_STATUS, &val); + } while (!(val & BOOST_BUSY_STATE)); + + regmap_write(pll->boost, BOOST_BOOST_CON, + HIWORD_UPDATE(1, BOOST_SW_CTRL_MASK, + BOOST_SW_CTRL_SHIFT) | + HIWORD_UPDATE(1, BOOST_LOW_FREQ_EN_MASK, + BOOST_LOW_FREQ_EN_SHIFT)); +} + +static void rockchip_boost_disable_low(struct rockchip_clk_pll *pll) +{ + if (!pll->boost_enabled) + return; + + regmap_write(pll->boost, BOOST_BOOST_CON, + HIWORD_UPDATE(0, BOOST_LOW_FREQ_EN_MASK, + BOOST_LOW_FREQ_EN_SHIFT)); +} + +void rockchip_boost_disable_recovery_sw(struct clk_hw *hw) +{ + struct rockchip_clk_pll *pll; + + if (!hw) + return; + pll = to_rockchip_clk_pll(hw); + if (!pll->boost_enabled) + return; + + regmap_write(pll->boost, BOOST_BOOST_CON, + HIWORD_UPDATE(0, BOOST_RECOVERY_MASK, + BOOST_RECOVERY_SHIFT)); + regmap_write(pll->boost, BOOST_BOOST_CON, + HIWORD_UPDATE(0, BOOST_SW_CTRL_MASK, + BOOST_SW_CTRL_SHIFT)); +} + +void rockchip_boost_add_core_div(struct clk_hw *hw, unsigned long prate) +{ + struct rockchip_clk_pll *pll; + unsigned int div; + + if (!hw) + return; + pll = to_rockchip_clk_pll(hw); + if (!pll->boost_enabled || pll->boost_backup_pll_rate == prate) + return; + + /* todo */ + if (pll->boost_backup_pll_usage == BOOST_BACKUP_PLL_USAGE_TARGET) + return; + /* + * cpu clock rate should be less than or equal to + * low rate when change pll rate in boost module + */ + if (pll->boost_low_rate && prate > pll->boost_low_rate) { + div = DIV_ROUND_UP(prate, pll->boost_low_rate) - 1; + regmap_write(pll->boost, BOOST_CLK_CON, + HIWORD_UPDATE(div, BOOST_CORE_DIV_MASK, + BOOST_CORE_DIV_SHIFT)); + pll->boost_backup_pll_rate = prate; + } +} + +#ifdef CONFIG_DEBUG_FS +#include + +#ifndef MODULE +static int boost_summary_show(struct seq_file *s, void *data) +{ + struct rockchip_clk_pll *pll = (struct rockchip_clk_pll *)s->private; + u32 boost_count = 0; + u32 freq_cnt0 = 0, freq_cnt1 = 0; + u64 freq_cnt = 0, high_freq_time = 0; + u32 short_count = 0, short_threshold = 0; + u32 interval_time = 0; + + seq_puts(s, " device boost_count high_freq_count high_freq_time short_count short_threshold interval_count\n"); + seq_puts(s, "------------------------------------------------------------------------------------------------------\n"); + seq_printf(s, " %s\n", clk_hw_get_name(&pll->hw)); + + regmap_read(pll->boost, BOOST_SWITCH_CNT, &boost_count); + + regmap_read(pll->boost, BOOST_HIGH_PERF_CNT0, &freq_cnt0); + regmap_read(pll->boost, BOOST_HIGH_PERF_CNT1, &freq_cnt1); + freq_cnt = ((u64)freq_cnt1 << 32) + (u64)freq_cnt0; + high_freq_time = freq_cnt; + do_div(high_freq_time, 24); + + regmap_read(pll->boost, BOOST_SHORT_SWITCH_CNT, &short_count); + regmap_read(pll->boost, BOOST_STATIS_THRESHOLD, &short_threshold); + regmap_read(pll->boost, BOOST_SWITCH_THRESHOLD, &interval_time); + + seq_printf(s, "%22u %17llu %15llu %12u %16u %15u\n", + boost_count, freq_cnt, high_freq_time, short_count, + short_threshold, interval_time); + + return 0; +} + +static int boost_summary_open(struct inode *inode, struct file *file) +{ + return single_open(file, boost_summary_show, inode->i_private); +} + +static const struct file_operations boost_summary_fops = { + .open = boost_summary_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int boost_config_show(struct seq_file *s, void *data) +{ + struct rockchip_clk_pll *pll = (struct rockchip_clk_pll *)s->private; + + seq_printf(s, "boost_enabled: %d\n", pll->boost_enabled); + seq_printf(s, "boost_low_rate: %lu\n", pll->boost_low_rate); + seq_printf(s, "boost_high_rate: %lu\n", pll->boost_high_rate); + + return 0; +} + +static int boost_config_open(struct inode *inode, struct file *file) +{ + return single_open(file, boost_config_show, inode->i_private); +} + +static const struct file_operations boost_config_fops = { + .open = boost_config_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int boost_debug_create_one(struct rockchip_clk_pll *pll, + struct dentry *rootdir) +{ + struct dentry *pdentry, *d; + + pdentry = debugfs_lookup(clk_hw_get_name(&pll->hw), rootdir); + if (!pdentry) { + pr_err("%s: failed to lookup %s dentry\n", __func__, + clk_hw_get_name(&pll->hw)); + return -ENOMEM; + } + + d = debugfs_create_file("boost_summary", 0444, pdentry, + pll, &boost_summary_fops); + if (!d) { + pr_err("%s: failed to create boost_summary file\n", __func__); + return -ENOMEM; + } + + d = debugfs_create_file("boost_config", 0444, pdentry, + pll, &boost_config_fops); + if (!d) { + pr_err("%s: failed to create boost config file\n", __func__); + return -ENOMEM; + } + + return 0; +} + +static int __init boost_debug_init(void) +{ + struct rockchip_clk_pll *pll; + struct dentry *rootdir; + + rootdir = debugfs_lookup("clk", NULL); + if (!rootdir) { + pr_err("%s: failed to lookup clk dentry\n", __func__); + return -ENOMEM; + } + + mutex_lock(&clk_boost_lock); + + hlist_for_each_entry(pll, &clk_boost_list, debug_node) + boost_debug_create_one(pll, rootdir); + + mutex_unlock(&clk_boost_lock); + + return 0; +} +late_initcall(boost_debug_init); +#endif /* MODULE */ +#endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c index 7df2f1e00347..d5071884b3d5 100644 --- a/drivers/clk/rockchip/clk-rk3399.c +++ b/drivers/clk/rockchip/clk-rk3399.c @@ -15,6 +15,12 @@ #include #include "clk.h" +#define RK3399_I2S_FRAC_MAX_PRATE 800000000 +#define RK3399_UART_FRAC_MAX_PRATE 800000000 +#define RK3399_SPDIF_FRAC_MAX_PRATE 600000000 +#define RK3399_VOP_FRAC_MAX_PRATE 600000000 +#define RK3399_WIFI_FRAC_MAX_PRATE 600000000 + enum rk3399_plls { lpll, bpll, dpll, cpll, gpll, npll, vpll, }; @@ -105,25 +111,95 @@ static struct rockchip_pll_rate_table rk3399_pll_rates[] = { { /* sentinel */ }, }; +static struct rockchip_pll_rate_table rk3399_vpll_rates[] = { + /* _mhz, _refdiv, _fbdiv, _postdiv1, _postdiv2, _dsmpd, _frac */ + RK3036_PLL_RATE( 594000000, 1, 123, 5, 1, 0, 12582912), /* vco = 2970000000 */ + RK3036_PLL_RATE( 593406593, 1, 123, 5, 1, 0, 10508804), /* vco = 2967032965 */ + RK3036_PLL_RATE( 297000000, 1, 123, 5, 2, 0, 12582912), /* vco = 2970000000 */ + RK3036_PLL_RATE( 296703297, 1, 123, 5, 2, 0, 10508807), /* vco = 2967032970 */ + RK3036_PLL_RATE( 148500000, 1, 129, 7, 3, 0, 15728640), /* vco = 3118500000 */ + RK3036_PLL_RATE( 148351648, 1, 123, 5, 4, 0, 10508800), /* vco = 2967032960 */ + RK3036_PLL_RATE( 106500000, 1, 124, 7, 4, 0, 4194304), /* vco = 2982000000 */ + RK3036_PLL_RATE( 74250000, 1, 129, 7, 6, 0, 15728640), /* vco = 3118500000 */ + RK3036_PLL_RATE( 74175824, 1, 129, 7, 6, 0, 13550823), /* vco = 3115384608 */ + RK3036_PLL_RATE( 65000000, 1, 113, 7, 6, 0, 12582912), /* vco = 2730000000 */ + RK3036_PLL_RATE( 59340659, 1, 121, 7, 7, 0, 2581098), /* vco = 2907692291 */ + RK3036_PLL_RATE( 54000000, 1, 110, 7, 7, 0, 4194304), /* vco = 2646000000 */ + RK3036_PLL_RATE( 27000000, 1, 55, 7, 7, 0, 2097152), /* vco = 1323000000 */ + RK3036_PLL_RATE( 26973027, 1, 55, 7, 7, 0, 1173232), /* vco = 1321678323 */ + { /* sentinel */ }, +}; + /* CRU parents */ PNAME(mux_pll_p) = { "xin24m", "xin32k" }; -PNAME(mux_armclkl_p) = { "clk_core_l_lpll_src", - "clk_core_l_bpll_src", - "clk_core_l_dpll_src", - "clk_core_l_gpll_src" }; -PNAME(mux_armclkb_p) = { "clk_core_b_lpll_src", - "clk_core_b_bpll_src", - "clk_core_b_dpll_src", - "clk_core_b_gpll_src" }; PNAME(mux_ddrclk_p) = { "clk_ddrc_lpll_src", "clk_ddrc_bpll_src", "clk_ddrc_dpll_src", "clk_ddrc_gpll_src" }; + +PNAME(mux_pll_src_vpll_cpll_gpll_p) = { "vpll", "cpll", "gpll" }; +PNAME(mux_pll_src_dmyvpll_cpll_gpll_p) = { "dummy_vpll", "cpll", "gpll" }; + +#ifdef RK3399_TWO_PLL_FOR_VOP +PNAME(mux_aclk_cci_p) = { "dummy_cpll", + "gpll_aclk_cci_src", + "npll_aclk_cci_src", + "dummy_vpll" }; +PNAME(mux_cci_trace_p) = { "dummy_cpll", + "gpll_cci_trace" }; +PNAME(mux_cs_p) = { "dummy_cpll", "gpll_cs", + "npll_cs"}; +PNAME(mux_aclk_perihp_p) = { "dummy_cpll", + "gpll_aclk_perihp_src" }; + +PNAME(mux_pll_src_cpll_gpll_p) = { "dummy_cpll", "gpll" }; +PNAME(mux_pll_src_cpll_gpll_npll_p) = { "dummy_cpll", "gpll", "npll" }; +PNAME(mux_pll_src_cpll_gpll_ppll_p) = { "dummy_cpll", "gpll", "ppll" }; +PNAME(mux_pll_src_cpll_gpll_upll_p) = { "dummy_cpll", "gpll", "upll" }; +PNAME(mux_pll_src_npll_cpll_gpll_p) = { "npll", "dummy_cpll", "gpll" }; +PNAME(mux_pll_src_cpll_gpll_npll_ppll_p) = { "dummy_cpll", "gpll", "npll", + "ppll" }; +PNAME(mux_pll_src_cpll_gpll_npll_24m_p) = { "dummy_cpll", "gpll", "npll", + "xin24m" }; +PNAME(mux_pll_src_cpll_gpll_npll_usbphy480m_p) = { "dummy_cpll", "gpll", "npll", + "clk_usbphy_480m" }; +PNAME(mux_pll_src_ppll_cpll_gpll_npll_p) = { "ppll", "dummy_cpll", "gpll", + "npll", "upll" }; +PNAME(mux_pll_src_cpll_gpll_npll_upll_24m_p) = { "dummy_cpll", "gpll", "npll", + "upll", "xin24m" }; +PNAME(mux_pll_src_cpll_gpll_npll_ppll_upll_24m_p) = { "dummy_cpll", "gpll", "npll", + "ppll", "upll", "xin24m" }; +/* + * We hope to be able to HDMI/DP can obtain better signal quality, + * therefore, we move VOP pwm and aclk clocks to other PLLs, let + * HDMI/DP phyclock can monopolize VPLL. + */ +PNAME(mux_pll_src_dmyvpll_cpll_gpll_npll_p) = { "dummy_vpll", "dummy_cpll", "gpll", + "npll" }; +PNAME(mux_pll_src_dmyvpll_cpll_gpll_gpll_p) = { "dummy_vpll", "dummy_cpll", "gpll", + "gpll" }; +PNAME(mux_pll_src_24m_32k_cpll_gpll_p) = { "xin24m", "xin32k", + "dummy_cpll", "gpll" }; + +PNAME(mux_aclk_emmc_p) = { "dummy_cpll", + "gpll_aclk_emmc_src" }; + +PNAME(mux_aclk_perilp0_p) = { "dummy_cpll", + "gpll_aclk_perilp0_src" }; + +PNAME(mux_fclk_cm0s_p) = { "dummy_cpll", + "gpll_fclk_cm0s_src" }; + +PNAME(mux_hclk_perilp1_p) = { "dummy_cpll", + "gpll_hclk_perilp1_src" }; +PNAME(mux_aclk_gmac_p) = { "dummy_cpll", + "gpll_aclk_gmac_src" }; +#else PNAME(mux_aclk_cci_p) = { "cpll_aclk_cci_src", "gpll_aclk_cci_src", "npll_aclk_cci_src", - "vpll_aclk_cci_src" }; + "dummy_vpll" }; PNAME(mux_cci_trace_p) = { "cpll_cci_trace", "gpll_cci_trace" }; PNAME(mux_cs_p) = { "cpll_cs", "gpll_cs", @@ -148,26 +224,17 @@ PNAME(mux_pll_src_cpll_gpll_npll_upll_24m_p) = { "cpll", "gpll", "npll", "upll", "xin24m" }; PNAME(mux_pll_src_cpll_gpll_npll_ppll_upll_24m_p) = { "cpll", "gpll", "npll", "ppll", "upll", "xin24m" }; - -PNAME(mux_pll_src_vpll_cpll_gpll_p) = { "vpll", "cpll", "gpll" }; -PNAME(mux_pll_src_vpll_cpll_gpll_npll_p) = { "vpll", "cpll", "gpll", +/* + * We hope to be able to HDMI/DP can obtain better signal quality, + * therefore, we move VOP pwm and aclk clocks to other PLLs, let + * HDMI/DP phyclock can monopolize VPLL. + */ +PNAME(mux_pll_src_dmyvpll_cpll_gpll_npll_p) = { "dummy_vpll", "cpll", "gpll", "npll" }; -PNAME(mux_pll_src_vpll_cpll_gpll_24m_p) = { "vpll", "cpll", "gpll", - "xin24m" }; - -PNAME(mux_dclk_vop0_p) = { "dclk_vop0_div", - "dclk_vop0_frac" }; -PNAME(mux_dclk_vop1_p) = { "dclk_vop1_div", - "dclk_vop1_frac" }; - -PNAME(mux_clk_cif_p) = { "clk_cifout_src", "xin24m" }; - -PNAME(mux_pll_src_24m_usbphy480m_p) = { "xin24m", "clk_usbphy_480m" }; -PNAME(mux_pll_src_24m_pciephy_p) = { "xin24m", "clk_pciephy_ref100m" }; +PNAME(mux_pll_src_dmyvpll_cpll_gpll_gpll_p) = { "dummy_vpll", "cpll", "gpll", + "gpll" }; PNAME(mux_pll_src_24m_32k_cpll_gpll_p) = { "xin24m", "xin32k", "cpll", "gpll" }; -PNAME(mux_pciecore_cru_phy_p) = { "clk_pcie_core_cru", - "clk_pcie_core_phy" }; PNAME(mux_aclk_emmc_p) = { "cpll_aclk_emmc_src", "gpll_aclk_emmc_src" }; @@ -180,14 +247,26 @@ PNAME(mux_fclk_cm0s_p) = { "cpll_fclk_cm0s_src", PNAME(mux_hclk_perilp1_p) = { "cpll_hclk_perilp1_src", "gpll_hclk_perilp1_src" }; +PNAME(mux_aclk_gmac_p) = { "cpll_aclk_gmac_src", + "gpll_aclk_gmac_src" }; +#endif + +PNAME(mux_dclk_vop0_p) = { "dclk_vop0_div", + "dummy_dclk_vop0_frac" }; +PNAME(mux_dclk_vop1_p) = { "dclk_vop1_div", + "dummy_dclk_vop1_frac" }; + +PNAME(mux_clk_cif_p) = { "clk_cifout_src", "xin24m" }; +PNAME(mux_pll_src_24m_usbphy480m_p) = { "xin24m", "clk_usbphy_480m" }; +PNAME(mux_pll_src_24m_pciephy_p) = { "xin24m", "clk_pciephy_ref100m" }; +PNAME(mux_pciecore_cru_phy_p) = { "clk_pcie_core_cru", + "clk_pcie_core_phy" }; PNAME(mux_clk_testout1_p) = { "clk_testout1_pll_src", "xin24m" }; PNAME(mux_clk_testout2_p) = { "clk_testout2_pll_src", "xin24m" }; PNAME(mux_usbphy_480m_p) = { "clk_usbphy0_480m_src", "clk_usbphy1_480m_src" }; -PNAME(mux_aclk_gmac_p) = { "cpll_aclk_gmac_src", - "gpll_aclk_gmac_src" }; PNAME(mux_rmii_p) = { "clk_gmac", "clkin_gmac" }; PNAME(mux_spdif_p) = { "clk_spdif_div", "clk_spdif_frac", "clkin_i2s", "xin12m" }; @@ -201,20 +280,22 @@ PNAME(mux_i2sch_p) = { "clk_i2s0", "clk_i2s1", "clk_i2s2" }; PNAME(mux_i2sout_p) = { "clk_i2sout_src", "xin12m" }; -PNAME(mux_uart0_p) = { "clk_uart0_div", "clk_uart0_frac", "xin24m" }; -PNAME(mux_uart1_p) = { "clk_uart1_div", "clk_uart1_frac", "xin24m" }; -PNAME(mux_uart2_p) = { "clk_uart2_div", "clk_uart2_frac", "xin24m" }; -PNAME(mux_uart3_p) = { "clk_uart3_div", "clk_uart3_frac", "xin24m" }; +PNAME(mux_uart0_p) = { "xin24m", "clk_uart0_div", "clk_uart0_frac" }; +PNAME(mux_uart1_p) = { "xin24m", "clk_uart1_div", "clk_uart1_frac" }; +PNAME(mux_uart2_p) = { "xin24m", "clk_uart2_div", "clk_uart2_frac" }; +PNAME(mux_uart3_p) = { "xin24m", "clk_uart3_div", "clk_uart3_frac" }; /* PMU CRU parents */ PNAME(mux_ppll_24m_p) = { "ppll", "xin24m" }; PNAME(mux_24m_ppll_p) = { "xin24m", "ppll" }; PNAME(mux_fclk_cm0s_pmu_ppll_p) = { "fclk_cm0s_pmu_ppll_src", "xin24m" }; PNAME(mux_wifi_pmu_p) = { "clk_wifi_div", "clk_wifi_frac" }; -PNAME(mux_uart4_pmu_p) = { "clk_uart4_div", "clk_uart4_frac", - "xin24m" }; +PNAME(mux_uart4_pmu_p) = { "xin24m", "clk_uart4_div", + "clk_uart4_frac" }; PNAME(mux_clk_testout2_2io_p) = { "clk_testout2", "clk_32k_suspend_pmu" }; +static u32 uart_mux_idx[] = { 2, 0, 1 }; + static struct rockchip_pll_clock rk3399_pll_clks[] __initdata = { [lpll] = PLL(pll_rk3399, PLL_APLLL, "lpll", mux_pll_p, 0, RK3399_PLL_CON(0), RK3399_PLL_CON(3), 8, 31, 0, rk3399_pll_rates), @@ -222,18 +303,23 @@ static struct rockchip_pll_clock rk3399_pll_clks[] __initdata = { RK3399_PLL_CON(11), 8, 31, 0, rk3399_pll_rates), [dpll] = PLL(pll_rk3399, PLL_DPLL, "dpll", mux_pll_p, 0, RK3399_PLL_CON(16), RK3399_PLL_CON(19), 8, 31, 0, NULL), +#ifdef RK3399_TWO_PLL_FOR_VOP + [cpll] = PLL(pll_rk3399, PLL_CPLL, "cpll", mux_pll_p, 0, RK3399_PLL_CON(24), + RK3399_PLL_CON(27), 8, 31, 0, rk3399_pll_rates), +#else [cpll] = PLL(pll_rk3399, PLL_CPLL, "cpll", mux_pll_p, 0, RK3399_PLL_CON(24), RK3399_PLL_CON(27), 8, 31, ROCKCHIP_PLL_SYNC_RATE, rk3399_pll_rates), +#endif [gpll] = PLL(pll_rk3399, PLL_GPLL, "gpll", mux_pll_p, 0, RK3399_PLL_CON(32), - RK3399_PLL_CON(35), 8, 31, ROCKCHIP_PLL_SYNC_RATE, rk3399_pll_rates), + RK3399_PLL_CON(35), 8, 31, 0, rk3399_pll_rates), [npll] = PLL(pll_rk3399, PLL_NPLL, "npll", mux_pll_p, 0, RK3399_PLL_CON(40), RK3399_PLL_CON(43), 8, 31, ROCKCHIP_PLL_SYNC_RATE, rk3399_pll_rates), [vpll] = PLL(pll_rk3399, PLL_VPLL, "vpll", mux_pll_p, 0, RK3399_PLL_CON(48), - RK3399_PLL_CON(51), 8, 31, ROCKCHIP_PLL_SYNC_RATE, rk3399_pll_rates), + RK3399_PLL_CON(51), 8, 31, 0, rk3399_vpll_rates), }; static struct rockchip_pll_clock rk3399_pmu_pll_clks[] __initdata = { - [ppll] = PLL(pll_rk3399, PLL_PPLL, "ppll", mux_pll_p, 0, RK3399_PMU_PLL_CON(0), + [ppll] = PLL(pll_rk3399, PLL_PPLL, "ppll", mux_pll_p, CLK_IS_CRITICAL, RK3399_PMU_PLL_CON(0), RK3399_PMU_PLL_CON(3), 8, 31, ROCKCHIP_PLL_SYNC_RATE, rk3399_pll_rates), }; @@ -259,24 +345,24 @@ static struct rockchip_clk_branch rk3399_i2s2_fracmux __initdata = RK3399_CLKSEL_CON(30), 8, 2, MFLAGS); static struct rockchip_clk_branch rk3399_uart0_fracmux __initdata = - MUX(SCLK_UART0, "clk_uart0", mux_uart0_p, CLK_SET_RATE_PARENT, - RK3399_CLKSEL_CON(33), 8, 2, MFLAGS); + MUXTBL(SCLK_UART0, "clk_uart0", mux_uart0_p, CLK_SET_RATE_PARENT, + RK3399_CLKSEL_CON(33), 8, 2, MFLAGS, uart_mux_idx); static struct rockchip_clk_branch rk3399_uart1_fracmux __initdata = - MUX(SCLK_UART1, "clk_uart1", mux_uart1_p, CLK_SET_RATE_PARENT, - RK3399_CLKSEL_CON(34), 8, 2, MFLAGS); + MUXTBL(SCLK_UART1, "clk_uart1", mux_uart1_p, CLK_SET_RATE_PARENT, + RK3399_CLKSEL_CON(34), 8, 2, MFLAGS, uart_mux_idx); static struct rockchip_clk_branch rk3399_uart2_fracmux __initdata = - MUX(SCLK_UART2, "clk_uart2", mux_uart2_p, CLK_SET_RATE_PARENT, - RK3399_CLKSEL_CON(35), 8, 2, MFLAGS); + MUXTBL(SCLK_UART2, "clk_uart2", mux_uart2_p, CLK_SET_RATE_PARENT, + RK3399_CLKSEL_CON(35), 8, 2, MFLAGS, uart_mux_idx); static struct rockchip_clk_branch rk3399_uart3_fracmux __initdata = - MUX(SCLK_UART3, "clk_uart3", mux_uart3_p, CLK_SET_RATE_PARENT, - RK3399_CLKSEL_CON(36), 8, 2, MFLAGS); + MUXTBL(SCLK_UART3, "clk_uart3", mux_uart3_p, CLK_SET_RATE_PARENT, + RK3399_CLKSEL_CON(36), 8, 2, MFLAGS, uart_mux_idx); static struct rockchip_clk_branch rk3399_uart4_pmu_fracmux __initdata = - MUX(SCLK_UART4_PMU, "clk_uart4_pmu", mux_uart4_pmu_p, CLK_SET_RATE_PARENT, - RK3399_PMU_CLKSEL_CON(5), 8, 2, MFLAGS); + MUXTBL(SCLK_UART4_PMU, "clk_uart4_pmu", mux_uart4_pmu_p, CLK_SET_RATE_PARENT, + RK3399_PMU_CLKSEL_CON(5), 8, 2, MFLAGS, uart_mux_idx); static struct rockchip_clk_branch rk3399_dclk_vop0_fracmux __initdata = MUX(DCLK_VOP0, "dclk_vop0", mux_dclk_vop0_p, CLK_SET_RATE_PARENT, @@ -291,9 +377,10 @@ static struct rockchip_clk_branch rk3399_pmuclk_wifi_fracmux __initdata = RK3399_PMU_CLKSEL_CON(1), 14, 1, MFLAGS); static const struct rockchip_cpuclk_reg_data rk3399_cpuclkl_data = { - .core_reg = RK3399_CLKSEL_CON(0), - .div_core_shift = 0, - .div_core_mask = 0x1f, + .core_reg[0] = RK3399_CLKSEL_CON(0), + .div_core_shift[0] = 0, + .div_core_mask[0] = 0x1f, + .num_cores = 1, .mux_core_alt = 3, .mux_core_main = 0, .mux_core_shift = 6, @@ -301,9 +388,10 @@ static const struct rockchip_cpuclk_reg_data rk3399_cpuclkl_data = { }; static const struct rockchip_cpuclk_reg_data rk3399_cpuclkb_data = { - .core_reg = RK3399_CLKSEL_CON(2), - .div_core_shift = 0, - .div_core_mask = 0x1f, + .core_reg[0] = RK3399_CLKSEL_CON(2), + .div_core_shift[0] = 0, + .div_core_mask[0] = 0x1f, + .num_cores = 1, .mux_core_alt = 3, .mux_core_main = 1, .mux_core_shift = 6, @@ -406,9 +494,9 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = { GATE(SCLK_USB2PHY1_REF, "clk_usb2phy1_ref", "xin24m", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(6), 6, GFLAGS), - GATE(0, "clk_usbphy0_480m_src", "clk_usbphy0_480m", 0, + GATE(SCLK_USBPHY0_480M_SRC, "clk_usbphy0_480m_src", "clk_usbphy0_480m", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(13), 12, GFLAGS), - GATE(0, "clk_usbphy1_480m_src", "clk_usbphy1_480m", 0, + GATE(SCLK_USBPHY1_480M_SRC, "clk_usbphy1_480m_src", "clk_usbphy1_480m", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(13), 12, GFLAGS), MUX(0, "clk_usbphy_480m", mux_usbphy_480m_p, 0, RK3399_CLKSEL_CON(14), 6, 1, MFLAGS), @@ -423,7 +511,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = { COMPOSITE(ACLK_USB3, "aclk_usb3", mux_pll_src_cpll_gpll_npll_p, 0, RK3399_CLKSEL_CON(39), 6, 2, MFLAGS, 0, 5, DFLAGS, RK3399_CLKGATE_CON(12), 0, GFLAGS), - GATE(ACLK_USB3_NOC, "aclk_usb3_noc", "aclk_usb3", CLK_IGNORE_UNUSED, + GATE(ACLK_USB3_NOC, "aclk_usb3_noc", "aclk_usb3", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(30), 0, GFLAGS), GATE(ACLK_USB3OTG0, "aclk_usb3otg0", "aclk_usb3", 0, RK3399_CLKGATE_CON(30), 1, GFLAGS), @@ -549,7 +637,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = { GATE(ACLK_GMAC, "aclk_gmac", "aclk_gmac_pre", 0, RK3399_CLKGATE_CON(32), 0, GFLAGS), - GATE(ACLK_GMAC_NOC, "aclk_gmac_noc", "aclk_gmac_pre", CLK_IGNORE_UNUSED, + GATE(ACLK_GMAC_NOC, "aclk_gmac_noc", "aclk_gmac_pre", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(32), 1, GFLAGS), GATE(ACLK_PERF_GMAC, "aclk_perf_gmac", "aclk_gmac_pre", 0, RK3399_CLKGATE_CON(32), 4, GFLAGS), @@ -559,7 +647,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = { RK3399_CLKGATE_CON(6), 11, GFLAGS), GATE(PCLK_GMAC, "pclk_gmac", "pclk_gmac_pre", 0, RK3399_CLKGATE_CON(32), 2, GFLAGS), - GATE(PCLK_GMAC_NOC, "pclk_gmac_noc", "pclk_gmac_pre", CLK_IGNORE_UNUSED, + GATE(PCLK_GMAC_NOC, "pclk_gmac_noc", "pclk_gmac_pre", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(32), 3, GFLAGS), COMPOSITE(SCLK_MAC, "clk_gmac", mux_pll_src_cpll_gpll_npll_p, 0, @@ -578,13 +666,13 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = { RK3399_CLKGATE_CON(5), 9, GFLAGS), /* spdif */ - COMPOSITE(0, "clk_spdif_div", mux_pll_src_cpll_gpll_p, 0, + COMPOSITE(SCLK_SPDIF_DIV, "clk_spdif_div", mux_pll_src_cpll_gpll_p, 0, RK3399_CLKSEL_CON(32), 7, 1, MFLAGS, 0, 7, DFLAGS, RK3399_CLKGATE_CON(8), 13, GFLAGS), - COMPOSITE_FRACMUX(0, "clk_spdif_frac", "clk_spdif_div", 0, + COMPOSITE_FRACMUX(0, "clk_spdif_frac", "clk_spdif_div", CLK_SET_RATE_PARENT, RK3399_CLKSEL_CON(99), 0, RK3399_CLKGATE_CON(8), 14, GFLAGS, - &rk3399_spdif_fracmux), + &rk3399_spdif_fracmux, RK3399_SPDIF_FRAC_MAX_PRATE), GATE(SCLK_SPDIF_8CH, "clk_spdif", "clk_spdif_mux", CLK_SET_RATE_PARENT, RK3399_CLKGATE_CON(8), 15, GFLAGS), @@ -592,84 +680,84 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = { RK3399_CLKSEL_CON(32), 15, 1, MFLAGS, 8, 5, DFLAGS, RK3399_CLKGATE_CON(10), 6, GFLAGS), /* i2s */ - COMPOSITE(0, "clk_i2s0_div", mux_pll_src_cpll_gpll_p, 0, + COMPOSITE(SCLK_I2S0_DIV, "clk_i2s0_div", mux_pll_src_cpll_gpll_p, 0, RK3399_CLKSEL_CON(28), 7, 1, MFLAGS, 0, 7, DFLAGS, RK3399_CLKGATE_CON(8), 3, GFLAGS), - COMPOSITE_FRACMUX(0, "clk_i2s0_frac", "clk_i2s0_div", 0, + COMPOSITE_FRACMUX(0, "clk_i2s0_frac", "clk_i2s0_div", CLK_SET_RATE_PARENT, RK3399_CLKSEL_CON(96), 0, RK3399_CLKGATE_CON(8), 4, GFLAGS, - &rk3399_i2s0_fracmux), + &rk3399_i2s0_fracmux, RK3399_I2S_FRAC_MAX_PRATE), GATE(SCLK_I2S0_8CH, "clk_i2s0", "clk_i2s0_mux", CLK_SET_RATE_PARENT, RK3399_CLKGATE_CON(8), 5, GFLAGS), - COMPOSITE(0, "clk_i2s1_div", mux_pll_src_cpll_gpll_p, 0, + COMPOSITE(SCLK_I2S1_DIV, "clk_i2s1_div", mux_pll_src_cpll_gpll_p, 0, RK3399_CLKSEL_CON(29), 7, 1, MFLAGS, 0, 7, DFLAGS, RK3399_CLKGATE_CON(8), 6, GFLAGS), - COMPOSITE_FRACMUX(0, "clk_i2s1_frac", "clk_i2s1_div", 0, + COMPOSITE_FRACMUX(0, "clk_i2s1_frac", "clk_i2s1_div", CLK_SET_RATE_PARENT, RK3399_CLKSEL_CON(97), 0, RK3399_CLKGATE_CON(8), 7, GFLAGS, - &rk3399_i2s1_fracmux), + &rk3399_i2s1_fracmux, RK3399_I2S_FRAC_MAX_PRATE), GATE(SCLK_I2S1_8CH, "clk_i2s1", "clk_i2s1_mux", CLK_SET_RATE_PARENT, RK3399_CLKGATE_CON(8), 8, GFLAGS), - COMPOSITE(0, "clk_i2s2_div", mux_pll_src_cpll_gpll_p, 0, + COMPOSITE(SCLK_I2S2_DIV, "clk_i2s2_div", mux_pll_src_cpll_gpll_p, 0, RK3399_CLKSEL_CON(30), 7, 1, MFLAGS, 0, 7, DFLAGS, RK3399_CLKGATE_CON(8), 9, GFLAGS), - COMPOSITE_FRACMUX(0, "clk_i2s2_frac", "clk_i2s2_div", 0, + COMPOSITE_FRACMUX(0, "clk_i2s2_frac", "clk_i2s2_div", CLK_SET_RATE_PARENT, RK3399_CLKSEL_CON(98), 0, RK3399_CLKGATE_CON(8), 10, GFLAGS, - &rk3399_i2s2_fracmux), + &rk3399_i2s2_fracmux, RK3399_I2S_FRAC_MAX_PRATE), GATE(SCLK_I2S2_8CH, "clk_i2s2", "clk_i2s2_mux", CLK_SET_RATE_PARENT, RK3399_CLKGATE_CON(8), 11, GFLAGS), - MUX(0, "clk_i2sout_src", mux_i2sch_p, CLK_SET_RATE_PARENT, + MUX(SCLK_I2SOUT_SRC, "clk_i2sout_src", mux_i2sch_p, CLK_SET_RATE_PARENT, RK3399_CLKSEL_CON(31), 0, 2, MFLAGS), COMPOSITE_NODIV(SCLK_I2S_8CH_OUT, "clk_i2sout", mux_i2sout_p, CLK_SET_RATE_PARENT, RK3399_CLKSEL_CON(31), 2, 1, MFLAGS, RK3399_CLKGATE_CON(8), 12, GFLAGS), /* uart */ - MUX(0, "clk_uart0_src", mux_pll_src_cpll_gpll_upll_p, 0, + MUX(SCLK_UART0_SRC, "clk_uart0_src", mux_pll_src_cpll_gpll_upll_p, 0, RK3399_CLKSEL_CON(33), 12, 2, MFLAGS), COMPOSITE_NOMUX(0, "clk_uart0_div", "clk_uart0_src", 0, RK3399_CLKSEL_CON(33), 0, 7, DFLAGS, RK3399_CLKGATE_CON(9), 0, GFLAGS), - COMPOSITE_FRACMUX(0, "clk_uart0_frac", "clk_uart0_div", 0, + COMPOSITE_FRACMUX(0, "clk_uart0_frac", "clk_uart0_div", CLK_SET_RATE_PARENT, RK3399_CLKSEL_CON(100), 0, RK3399_CLKGATE_CON(9), 1, GFLAGS, - &rk3399_uart0_fracmux), + &rk3399_uart0_fracmux, RK3399_UART_FRAC_MAX_PRATE), - MUX(0, "clk_uart_src", mux_pll_src_cpll_gpll_p, 0, + MUX(SCLK_UART_SRC, "clk_uart_src", mux_pll_src_cpll_gpll_p, 0, RK3399_CLKSEL_CON(33), 15, 1, MFLAGS), COMPOSITE_NOMUX(0, "clk_uart1_div", "clk_uart_src", 0, RK3399_CLKSEL_CON(34), 0, 7, DFLAGS, RK3399_CLKGATE_CON(9), 2, GFLAGS), - COMPOSITE_FRACMUX(0, "clk_uart1_frac", "clk_uart1_div", 0, + COMPOSITE_FRACMUX(0, "clk_uart1_frac", "clk_uart1_div", CLK_SET_RATE_PARENT, RK3399_CLKSEL_CON(101), 0, RK3399_CLKGATE_CON(9), 3, GFLAGS, - &rk3399_uart1_fracmux), + &rk3399_uart1_fracmux, RK3399_UART_FRAC_MAX_PRATE), COMPOSITE_NOMUX(0, "clk_uart2_div", "clk_uart_src", 0, RK3399_CLKSEL_CON(35), 0, 7, DFLAGS, RK3399_CLKGATE_CON(9), 4, GFLAGS), - COMPOSITE_FRACMUX(0, "clk_uart2_frac", "clk_uart2_div", 0, + COMPOSITE_FRACMUX(0, "clk_uart2_frac", "clk_uart2_div", CLK_SET_RATE_PARENT, RK3399_CLKSEL_CON(102), 0, RK3399_CLKGATE_CON(9), 5, GFLAGS, - &rk3399_uart2_fracmux), + &rk3399_uart2_fracmux, RK3399_UART_FRAC_MAX_PRATE), COMPOSITE_NOMUX(0, "clk_uart3_div", "clk_uart_src", 0, RK3399_CLKSEL_CON(36), 0, 7, DFLAGS, RK3399_CLKGATE_CON(9), 6, GFLAGS), - COMPOSITE_FRACMUX(0, "clk_uart3_frac", "clk_uart3_div", 0, + COMPOSITE_FRACMUX(0, "clk_uart3_frac", "clk_uart3_div", CLK_SET_RATE_PARENT, RK3399_CLKSEL_CON(103), 0, RK3399_CLKGATE_CON(9), 7, GFLAGS, - &rk3399_uart3_fracmux), + &rk3399_uart3_fracmux, RK3399_UART_FRAC_MAX_PRATE), - COMPOSITE(PCLK_DDR, "pclk_ddr", mux_pll_src_cpll_gpll_p, CLK_IGNORE_UNUSED, + COMPOSITE(PCLK_DDR, "pclk_ddr", mux_pll_src_cpll_gpll_p, CLK_IS_CRITICAL, RK3399_CLKSEL_CON(6), 15, 1, MFLAGS, 8, 5, DFLAGS, RK3399_CLKGATE_CON(3), 4, GFLAGS), - GATE(PCLK_CENTER_MAIN_NOC, "pclk_center_main_noc", "pclk_ddr", CLK_IGNORE_UNUSED, + GATE(PCLK_CENTER_MAIN_NOC, "pclk_center_main_noc", "pclk_ddr", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(18), 10, GFLAGS), GATE(PCLK_DDR_MON, "pclk_ddr_mon", "pclk_ddr", 0, RK3399_CLKGATE_CON(18), 12, GFLAGS), @@ -686,30 +774,30 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = { RK3399_CLKGATE_CON(3), 6, GFLAGS), /* cci */ - GATE(0, "cpll_aclk_cci_src", "cpll", CLK_IGNORE_UNUSED, + GATE(0, "cpll_aclk_cci_src", "cpll", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(2), 0, GFLAGS), - GATE(0, "gpll_aclk_cci_src", "gpll", CLK_IGNORE_UNUSED, + GATE(0, "gpll_aclk_cci_src", "gpll", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(2), 1, GFLAGS), - GATE(0, "npll_aclk_cci_src", "npll", CLK_IGNORE_UNUSED, + GATE(0, "npll_aclk_cci_src", "npll", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(2), 2, GFLAGS), - GATE(0, "vpll_aclk_cci_src", "vpll", CLK_IGNORE_UNUSED, + GATE(0, "vpll_aclk_cci_src", "vpll", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(2), 3, GFLAGS), - COMPOSITE(0, "aclk_cci_pre", mux_aclk_cci_p, CLK_IGNORE_UNUSED, + COMPOSITE(0, "aclk_cci_pre", mux_aclk_cci_p, CLK_IS_CRITICAL, RK3399_CLKSEL_CON(5), 6, 2, MFLAGS, 0, 5, DFLAGS, RK3399_CLKGATE_CON(2), 4, GFLAGS), - GATE(ACLK_ADB400M_PD_CORE_L, "aclk_adb400m_pd_core_l", "aclk_cci_pre", CLK_IGNORE_UNUSED, + GATE(ACLK_ADB400M_PD_CORE_L, "aclk_adb400m_pd_core_l", "aclk_cci_pre", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(15), 0, GFLAGS), - GATE(ACLK_ADB400M_PD_CORE_B, "aclk_adb400m_pd_core_b", "aclk_cci_pre", CLK_IGNORE_UNUSED, + GATE(ACLK_ADB400M_PD_CORE_B, "aclk_adb400m_pd_core_b", "aclk_cci_pre", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(15), 1, GFLAGS), - GATE(ACLK_CCI, "aclk_cci", "aclk_cci_pre", CLK_IGNORE_UNUSED, + GATE(ACLK_CCI, "aclk_cci", "aclk_cci_pre", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(15), 2, GFLAGS), - GATE(ACLK_CCI_NOC0, "aclk_cci_noc0", "aclk_cci_pre", CLK_IGNORE_UNUSED, + GATE(ACLK_CCI_NOC0, "aclk_cci_noc0", "aclk_cci_pre", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(15), 3, GFLAGS), - GATE(ACLK_CCI_NOC1, "aclk_cci_noc1", "aclk_cci_pre", CLK_IGNORE_UNUSED, + GATE(ACLK_CCI_NOC1, "aclk_cci_noc1", "aclk_cci_pre", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(15), 4, GFLAGS), - GATE(ACLK_CCI_GRF, "aclk_cci_grf", "aclk_cci_pre", CLK_IGNORE_UNUSED, + GATE(ACLK_CCI_GRF, "aclk_cci_grf", "aclk_cci_pre", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(15), 7, GFLAGS), GATE(0, "cpll_cci_trace", "cpll", CLK_IGNORE_UNUSED, @@ -717,20 +805,20 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = { GATE(0, "gpll_cci_trace", "gpll", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(2), 6, GFLAGS), COMPOSITE(SCLK_CCI_TRACE, "clk_cci_trace", mux_cci_trace_p, CLK_IGNORE_UNUSED, - RK3399_CLKSEL_CON(5), 15, 2, MFLAGS, 8, 5, DFLAGS, + RK3399_CLKSEL_CON(5), 15, 1, MFLAGS, 8, 5, DFLAGS, RK3399_CLKGATE_CON(2), 7, GFLAGS), - GATE(0, "cpll_cs", "cpll", CLK_IGNORE_UNUSED, + GATE(0, "cpll_cs", "cpll", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(2), 8, GFLAGS), - GATE(0, "gpll_cs", "gpll", CLK_IGNORE_UNUSED, + GATE(0, "gpll_cs", "gpll", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(2), 9, GFLAGS), - GATE(0, "npll_cs", "npll", CLK_IGNORE_UNUSED, + GATE(0, "npll_cs", "npll", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(2), 10, GFLAGS), - COMPOSITE_NOGATE(0, "clk_cs", mux_cs_p, CLK_IGNORE_UNUSED, + COMPOSITE_NOGATE(SCLK_CS, "clk_cs", mux_cs_p, CLK_IS_CRITICAL, RK3399_CLKSEL_CON(4), 6, 2, MFLAGS, 0, 5, DFLAGS), GATE(0, "clk_dbg_cxcs", "clk_cs", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(15), 5, GFLAGS), - GATE(0, "clk_dbg_noc", "clk_cs", CLK_IGNORE_UNUSED, + GATE(0, "clk_dbg_noc", "clk_cs", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(15), 6, GFLAGS), /* vcodec */ @@ -742,12 +830,12 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = { RK3399_CLKGATE_CON(4), 1, GFLAGS), GATE(HCLK_VCODEC, "hclk_vcodec", "hclk_vcodec_pre", 0, RK3399_CLKGATE_CON(17), 2, GFLAGS), - GATE(0, "hclk_vcodec_noc", "hclk_vcodec_pre", CLK_IGNORE_UNUSED, + GATE(0, "hclk_vcodec_noc", "hclk_vcodec_pre", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(17), 3, GFLAGS), GATE(ACLK_VCODEC, "aclk_vcodec", "aclk_vcodec_pre", 0, RK3399_CLKGATE_CON(17), 0, GFLAGS), - GATE(0, "aclk_vcodec_noc", "aclk_vcodec_pre", CLK_IGNORE_UNUSED, + GATE(0, "aclk_vcodec_noc", "aclk_vcodec_pre", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(17), 1, GFLAGS), /* vdu */ @@ -766,12 +854,12 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = { RK3399_CLKGATE_CON(4), 3, GFLAGS), GATE(HCLK_VDU, "hclk_vdu", "hclk_vdu_pre", 0, RK3399_CLKGATE_CON(17), 10, GFLAGS), - GATE(HCLK_VDU_NOC, "hclk_vdu_noc", "hclk_vdu_pre", CLK_IGNORE_UNUSED, + GATE(HCLK_VDU_NOC, "hclk_vdu_noc", "hclk_vdu_pre", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(17), 11, GFLAGS), GATE(ACLK_VDU, "aclk_vdu", "aclk_vdu_pre", 0, RK3399_CLKGATE_CON(17), 8, GFLAGS), - GATE(ACLK_VDU_NOC, "aclk_vdu_noc", "aclk_vdu_pre", CLK_IGNORE_UNUSED, + GATE(ACLK_VDU_NOC, "aclk_vdu_noc", "aclk_vdu_pre", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(17), 9, GFLAGS), /* iep */ @@ -783,12 +871,12 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = { RK3399_CLKGATE_CON(4), 7, GFLAGS), GATE(HCLK_IEP, "hclk_iep", "hclk_iep_pre", 0, RK3399_CLKGATE_CON(16), 2, GFLAGS), - GATE(HCLK_IEP_NOC, "hclk_iep_noc", "hclk_iep_pre", CLK_IGNORE_UNUSED, + GATE(HCLK_IEP_NOC, "hclk_iep_noc", "hclk_iep_pre", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(16), 3, GFLAGS), GATE(ACLK_IEP, "aclk_iep", "aclk_iep_pre", 0, RK3399_CLKGATE_CON(16), 0, GFLAGS), - GATE(ACLK_IEP_NOC, "aclk_iep_noc", "aclk_iep_pre", CLK_IGNORE_UNUSED, + GATE(ACLK_IEP_NOC, "aclk_iep_noc", "aclk_iep_pre", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(16), 1, GFLAGS), /* rga */ @@ -804,21 +892,21 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = { RK3399_CLKGATE_CON(4), 9, GFLAGS), GATE(HCLK_RGA, "hclk_rga", "hclk_rga_pre", 0, RK3399_CLKGATE_CON(16), 10, GFLAGS), - GATE(HCLK_RGA_NOC, "hclk_rga_noc", "hclk_rga_pre", CLK_IGNORE_UNUSED, + GATE(HCLK_RGA_NOC, "hclk_rga_noc", "hclk_rga_pre", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(16), 11, GFLAGS), GATE(ACLK_RGA, "aclk_rga", "aclk_rga_pre", 0, RK3399_CLKGATE_CON(16), 8, GFLAGS), - GATE(ACLK_RGA_NOC, "aclk_rga_noc", "aclk_rga_pre", CLK_IGNORE_UNUSED, + GATE(ACLK_RGA_NOC, "aclk_rga_noc", "aclk_rga_pre", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(16), 9, GFLAGS), /* center */ - COMPOSITE(0, "aclk_center", mux_pll_src_cpll_gpll_npll_p, CLK_IGNORE_UNUSED, + COMPOSITE(ACLK_CENTER, "aclk_center", mux_pll_src_cpll_gpll_npll_p, CLK_IS_CRITICAL, RK3399_CLKSEL_CON(12), 14, 2, MFLAGS, 8, 5, DFLAGS, RK3399_CLKGATE_CON(3), 7, GFLAGS), - GATE(ACLK_CENTER_MAIN_NOC, "aclk_center_main_noc", "aclk_center", CLK_IGNORE_UNUSED, + GATE(ACLK_CENTER_MAIN_NOC, "aclk_center_main_noc", "aclk_center", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(19), 0, GFLAGS), - GATE(ACLK_CENTER_PERI_NOC, "aclk_center_peri_noc", "aclk_center", CLK_IGNORE_UNUSED, + GATE(ACLK_CENTER_PERI_NOC, "aclk_center_peri_noc", "aclk_center", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(19), 1, GFLAGS), /* gpu */ @@ -835,25 +923,25 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = { RK3399_CLKGATE_CON(13), 1, GFLAGS), /* perihp */ - GATE(0, "cpll_aclk_perihp_src", "cpll", CLK_IGNORE_UNUSED, + GATE(0, "cpll_aclk_perihp_src", "cpll", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(5), 1, GFLAGS), - GATE(0, "gpll_aclk_perihp_src", "gpll", CLK_IGNORE_UNUSED, + GATE(0, "gpll_aclk_perihp_src", "gpll", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(5), 0, GFLAGS), - COMPOSITE(ACLK_PERIHP, "aclk_perihp", mux_aclk_perihp_p, CLK_IGNORE_UNUSED, + COMPOSITE(ACLK_PERIHP, "aclk_perihp", mux_aclk_perihp_p, CLK_IS_CRITICAL, RK3399_CLKSEL_CON(14), 7, 1, MFLAGS, 0, 5, DFLAGS, RK3399_CLKGATE_CON(5), 2, GFLAGS), - COMPOSITE_NOMUX(HCLK_PERIHP, "hclk_perihp", "aclk_perihp", CLK_IGNORE_UNUSED, + COMPOSITE_NOMUX(HCLK_PERIHP, "hclk_perihp", "aclk_perihp", CLK_IS_CRITICAL, RK3399_CLKSEL_CON(14), 8, 2, DFLAGS, RK3399_CLKGATE_CON(5), 3, GFLAGS), - COMPOSITE_NOMUX(PCLK_PERIHP, "pclk_perihp", "aclk_perihp", CLK_IGNORE_UNUSED, - RK3399_CLKSEL_CON(14), 12, 2, DFLAGS, + COMPOSITE_NOMUX(PCLK_PERIHP, "pclk_perihp", "aclk_perihp", CLK_IS_CRITICAL, + RK3399_CLKSEL_CON(14), 12, 3, DFLAGS, RK3399_CLKGATE_CON(5), 4, GFLAGS), GATE(ACLK_PERF_PCIE, "aclk_perf_pcie", "aclk_perihp", 0, RK3399_CLKGATE_CON(20), 2, GFLAGS), GATE(ACLK_PCIE, "aclk_pcie", "aclk_perihp", 0, RK3399_CLKGATE_CON(20), 10, GFLAGS), - GATE(0, "aclk_perihp_noc", "aclk_perihp", CLK_IGNORE_UNUSED, + GATE(0, "aclk_perihp_noc", "aclk_perihp", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(20), 12, GFLAGS), GATE(HCLK_HOST0, "hclk_host0", "hclk_perihp", 0, @@ -866,16 +954,16 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = { RK3399_CLKGATE_CON(20), 8, GFLAGS), GATE(HCLK_HSIC, "hclk_hsic", "hclk_perihp", 0, RK3399_CLKGATE_CON(20), 9, GFLAGS), - GATE(0, "hclk_perihp_noc", "hclk_perihp", CLK_IGNORE_UNUSED, + GATE(0, "hclk_perihp_noc", "hclk_perihp", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(20), 13, GFLAGS), GATE(0, "hclk_ahb1tom", "hclk_perihp", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(20), 15, GFLAGS), - GATE(PCLK_PERIHP_GRF, "pclk_perihp_grf", "pclk_perihp", CLK_IGNORE_UNUSED, + GATE(PCLK_PERIHP_GRF, "pclk_perihp_grf", "pclk_perihp", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(20), 4, GFLAGS), GATE(PCLK_PCIE, "pclk_pcie", "pclk_perihp", 0, RK3399_CLKGATE_CON(20), 11, GFLAGS), - GATE(0, "pclk_perihp_noc", "pclk_perihp", CLK_IGNORE_UNUSED, + GATE(0, "pclk_perihp_noc", "pclk_perihp", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(20), 14, GFLAGS), GATE(PCLK_HSICPHY, "pclk_hsicphy", "pclk_perihp", 0, RK3399_CLKGATE_CON(31), 8, GFLAGS), @@ -886,7 +974,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = { RK3399_CLKGATE_CON(12), 13, GFLAGS), GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_sd", 0, RK3399_CLKGATE_CON(33), 8, GFLAGS), - GATE(0, "hclk_sdmmc_noc", "hclk_sd", CLK_IGNORE_UNUSED, + GATE(0, "hclk_sdmmc_noc", "hclk_sd", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(33), 9, GFLAGS), COMPOSITE(SCLK_SDIO, "clk_sdio", mux_pll_src_cpll_gpll_npll_ppll_upll_24m_p, 0, @@ -933,23 +1021,23 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = { RK3399_CLKSEL_CON(21), 7, 1, MFLAGS, 0, 5, DFLAGS), GATE(ACLK_EMMC_CORE, "aclk_emmccore", "aclk_emmc", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(32), 8, GFLAGS), - GATE(ACLK_EMMC_NOC, "aclk_emmc_noc", "aclk_emmc", CLK_IGNORE_UNUSED, + GATE(ACLK_EMMC_NOC, "aclk_emmc_noc", "aclk_emmc", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(32), 9, GFLAGS), GATE(ACLK_EMMC_GRF, "aclk_emmcgrf", "aclk_emmc", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(32), 10, GFLAGS), /* perilp0 */ - GATE(0, "cpll_aclk_perilp0_src", "cpll", CLK_IGNORE_UNUSED, + GATE(0, "cpll_aclk_perilp0_src", "cpll", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(7), 1, GFLAGS), - GATE(0, "gpll_aclk_perilp0_src", "gpll", CLK_IGNORE_UNUSED, + GATE(0, "gpll_aclk_perilp0_src", "gpll", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(7), 0, GFLAGS), - COMPOSITE(ACLK_PERILP0, "aclk_perilp0", mux_aclk_perilp0_p, CLK_IGNORE_UNUSED, + COMPOSITE(ACLK_PERILP0, "aclk_perilp0", mux_aclk_perilp0_p, CLK_IS_CRITICAL, RK3399_CLKSEL_CON(23), 7, 1, MFLAGS, 0, 5, DFLAGS, RK3399_CLKGATE_CON(7), 2, GFLAGS), - COMPOSITE_NOMUX(HCLK_PERILP0, "hclk_perilp0", "aclk_perilp0", CLK_IGNORE_UNUSED, + COMPOSITE_NOMUX(HCLK_PERILP0, "hclk_perilp0", "aclk_perilp0", CLK_IS_CRITICAL, RK3399_CLKSEL_CON(23), 8, 2, DFLAGS, RK3399_CLKGATE_CON(7), 3, GFLAGS), - COMPOSITE_NOMUX(PCLK_PERILP0, "pclk_perilp0", "aclk_perilp0", 0, + COMPOSITE_NOMUX(PCLK_PERILP0, "pclk_perilp0", "aclk_perilp0", CLK_IS_CRITICAL, RK3399_CLKSEL_CON(23), 12, 3, DFLAGS, RK3399_CLKGATE_CON(7), 4, GFLAGS), @@ -964,8 +1052,8 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = { GATE(SCLK_INTMEM5, "clk_intmem5", "aclk_perilp0", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(23), 7, GFLAGS), GATE(ACLK_DCF, "aclk_dcf", "aclk_perilp0", 0, RK3399_CLKGATE_CON(23), 8, GFLAGS), GATE(ACLK_DMAC0_PERILP, "aclk_dmac0_perilp", "aclk_perilp0", 0, RK3399_CLKGATE_CON(25), 5, GFLAGS), - GATE(ACLK_DMAC1_PERILP, "aclk_dmac1_perilp", "aclk_perilp0", 0, RK3399_CLKGATE_CON(25), 6, GFLAGS), - GATE(ACLK_PERILP0_NOC, "aclk_perilp0_noc", "aclk_perilp0", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(25), 7, GFLAGS), + GATE(ACLK_DMAC1_PERILP, "aclk_dmac1_perilp", "aclk_perilp0", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(25), 6, GFLAGS), + GATE(ACLK_PERILP0_NOC, "aclk_perilp0_noc", "aclk_perilp0", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(25), 7, GFLAGS), /* hclk_perilp0 gates */ GATE(HCLK_ROM, "hclk_rom", "hclk_perilp0", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(24), 4, GFLAGS), @@ -973,7 +1061,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = { GATE(HCLK_S_CRYPTO0, "hclk_s_crypto0", "hclk_perilp0", 0, RK3399_CLKGATE_CON(24), 6, GFLAGS), GATE(HCLK_M_CRYPTO1, "hclk_m_crypto1", "hclk_perilp0", 0, RK3399_CLKGATE_CON(24), 14, GFLAGS), GATE(HCLK_S_CRYPTO1, "hclk_s_crypto1", "hclk_perilp0", 0, RK3399_CLKGATE_CON(24), 15, GFLAGS), - GATE(HCLK_PERILP0_NOC, "hclk_perilp0_noc", "hclk_perilp0", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(25), 8, GFLAGS), + GATE(HCLK_PERILP0_NOC, "hclk_perilp0_noc", "hclk_perilp0", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(25), 8, GFLAGS), /* pclk_perilp0 gates */ GATE(PCLK_DCF, "pclk_dcf", "pclk_perilp0", 0, RK3399_CLKGATE_CON(23), 9, GFLAGS), @@ -1001,29 +1089,29 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = { GATE(HCLK_M0_PERILP, "hclk_m0_perilp", "fclk_cm0s", 0, RK3399_CLKGATE_CON(24), 9, GFLAGS), GATE(DCLK_M0_PERILP, "dclk_m0_perilp", "fclk_cm0s", 0, RK3399_CLKGATE_CON(24), 10, GFLAGS), GATE(SCLK_M0_PERILP_DEC, "clk_m0_perilp_dec", "fclk_cm0s", 0, RK3399_CLKGATE_CON(24), 11, GFLAGS), - GATE(HCLK_M0_PERILP_NOC, "hclk_m0_perilp_noc", "fclk_cm0s", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(25), 11, GFLAGS), + GATE(HCLK_M0_PERILP_NOC, "hclk_m0_perilp_noc", "fclk_cm0s", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(25), 11, GFLAGS), /* perilp1 */ - GATE(0, "cpll_hclk_perilp1_src", "cpll", CLK_IGNORE_UNUSED, + GATE(0, "cpll_hclk_perilp1_src", "cpll", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(8), 1, GFLAGS), - GATE(0, "gpll_hclk_perilp1_src", "gpll", CLK_IGNORE_UNUSED, + GATE(0, "gpll_hclk_perilp1_src", "gpll", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(8), 0, GFLAGS), - COMPOSITE_NOGATE(HCLK_PERILP1, "hclk_perilp1", mux_hclk_perilp1_p, CLK_IGNORE_UNUSED, + COMPOSITE_NOGATE(HCLK_PERILP1, "hclk_perilp1", mux_hclk_perilp1_p, CLK_IS_CRITICAL, RK3399_CLKSEL_CON(25), 7, 1, MFLAGS, 0, 5, DFLAGS), - COMPOSITE_NOMUX(PCLK_PERILP1, "pclk_perilp1", "hclk_perilp1", CLK_IGNORE_UNUSED, + COMPOSITE_NOMUX(PCLK_PERILP1, "pclk_perilp1", "hclk_perilp1", CLK_IS_CRITICAL, RK3399_CLKSEL_CON(25), 8, 3, DFLAGS, RK3399_CLKGATE_CON(8), 2, GFLAGS), /* hclk_perilp1 gates */ - GATE(0, "hclk_perilp1_noc", "hclk_perilp1", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(25), 9, GFLAGS), - GATE(0, "hclk_sdio_noc", "hclk_perilp1", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(25), 12, GFLAGS), + GATE(0, "hclk_perilp1_noc", "hclk_perilp1", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(25), 9, GFLAGS), + GATE(0, "hclk_sdio_noc", "hclk_perilp1", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(25), 12, GFLAGS), GATE(HCLK_I2S0_8CH, "hclk_i2s0", "hclk_perilp1", 0, RK3399_CLKGATE_CON(34), 0, GFLAGS), GATE(HCLK_I2S1_8CH, "hclk_i2s1", "hclk_perilp1", 0, RK3399_CLKGATE_CON(34), 1, GFLAGS), GATE(HCLK_I2S2_8CH, "hclk_i2s2", "hclk_perilp1", 0, RK3399_CLKGATE_CON(34), 2, GFLAGS), GATE(HCLK_SPDIF, "hclk_spdif", "hclk_perilp1", 0, RK3399_CLKGATE_CON(34), 3, GFLAGS), GATE(HCLK_SDIO, "hclk_sdio", "hclk_perilp1", 0, RK3399_CLKGATE_CON(34), 4, GFLAGS), GATE(PCLK_SPI5, "pclk_spi5", "hclk_perilp1", 0, RK3399_CLKGATE_CON(34), 5, GFLAGS), - GATE(0, "hclk_sdioaudio_noc", "hclk_perilp1", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(34), 6, GFLAGS), + GATE(0, "hclk_sdioaudio_noc", "hclk_perilp1", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(34), 6, GFLAGS), /* pclk_perilp1 gates */ GATE(PCLK_UART0, "pclk_uart0", "pclk_perilp1", 0, RK3399_CLKGATE_CON(22), 0, GFLAGS), @@ -1046,7 +1134,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = { GATE(PCLK_SPI2, "pclk_spi2", "pclk_perilp1", 0, RK3399_CLKGATE_CON(23), 12, GFLAGS), GATE(PCLK_SPI4, "pclk_spi4", "pclk_perilp1", 0, RK3399_CLKGATE_CON(23), 13, GFLAGS), GATE(PCLK_PERIHP_GRF, "pclk_perilp_sgrf", "pclk_perilp1", 0, RK3399_CLKGATE_CON(24), 13, GFLAGS), - GATE(0, "pclk_perilp1_noc", "pclk_perilp1", 0, RK3399_CLKGATE_CON(25), 10, GFLAGS), + GATE(0, "pclk_perilp1_noc", "pclk_perilp1", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(25), 10, GFLAGS), /* saradc */ COMPOSITE_NOMUX(SCLK_SARADC, "clk_saradc", "xin24m", 0, @@ -1075,24 +1163,23 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = { COMPOSITE(ACLK_VIO, "aclk_vio", mux_pll_src_cpll_gpll_ppll_p, CLK_IGNORE_UNUSED, RK3399_CLKSEL_CON(42), 6, 2, MFLAGS, 0, 5, DFLAGS, RK3399_CLKGATE_CON(11), 0, GFLAGS), - COMPOSITE_NOMUX(PCLK_VIO, "pclk_vio", "aclk_vio", 0, + COMPOSITE_NOMUX(PCLK_VIO, "pclk_vio", "aclk_vio", CLK_IS_CRITICAL, RK3399_CLKSEL_CON(43), 0, 5, DFLAGS, RK3399_CLKGATE_CON(11), 1, GFLAGS), - GATE(ACLK_VIO_NOC, "aclk_vio_noc", "aclk_vio", CLK_IGNORE_UNUSED, + GATE(ACLK_VIO_NOC, "aclk_vio_noc", "aclk_vio", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(29), 0, GFLAGS), GATE(PCLK_MIPI_DSI0, "pclk_mipi_dsi0", "pclk_vio", 0, RK3399_CLKGATE_CON(29), 1, GFLAGS), GATE(PCLK_MIPI_DSI1, "pclk_mipi_dsi1", "pclk_vio", 0, RK3399_CLKGATE_CON(29), 2, GFLAGS), - GATE(PCLK_VIO_GRF, "pclk_vio_grf", "pclk_vio", CLK_IGNORE_UNUSED, + GATE(PCLK_VIO_GRF, "pclk_vio_grf", "pclk_vio", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(29), 12, GFLAGS), /* hdcp */ - COMPOSITE(ACLK_HDCP, "aclk_hdcp", mux_pll_src_cpll_gpll_ppll_p, 0, - RK3399_CLKSEL_CON(42), 14, 2, MFLAGS, 8, 5, DFLAGS, - RK3399_CLKGATE_CON(11), 12, GFLAGS), + COMPOSITE_NOGATE(ACLK_HDCP, "aclk_hdcp", mux_pll_src_cpll_gpll_ppll_p, 0, + RK3399_CLKSEL_CON(42), 14, 2, MFLAGS, 8, 5, DFLAGS), COMPOSITE_NOMUX(HCLK_HDCP, "hclk_hdcp", "aclk_hdcp", 0, RK3399_CLKSEL_CON(43), 5, 5, DFLAGS, RK3399_CLKGATE_CON(11), 3, GFLAGS), @@ -1100,17 +1187,17 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = { RK3399_CLKSEL_CON(43), 10, 5, DFLAGS, RK3399_CLKGATE_CON(11), 10, GFLAGS), - GATE(ACLK_HDCP_NOC, "aclk_hdcp_noc", "aclk_hdcp", CLK_IGNORE_UNUSED, + GATE(ACLK_HDCP_NOC, "aclk_hdcp_noc", "aclk_hdcp", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(29), 4, GFLAGS), GATE(ACLK_HDCP22, "aclk_hdcp22", "aclk_hdcp", 0, RK3399_CLKGATE_CON(29), 10, GFLAGS), - GATE(HCLK_HDCP_NOC, "hclk_hdcp_noc", "hclk_hdcp", CLK_IGNORE_UNUSED, + GATE(HCLK_HDCP_NOC, "hclk_hdcp_noc", "hclk_hdcp", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(29), 5, GFLAGS), GATE(HCLK_HDCP22, "hclk_hdcp22", "hclk_hdcp", 0, RK3399_CLKGATE_CON(29), 9, GFLAGS), - GATE(PCLK_HDCP_NOC, "pclk_hdcp_noc", "pclk_hdcp", CLK_IGNORE_UNUSED, + GATE(PCLK_HDCP_NOC, "pclk_hdcp_noc", "pclk_hdcp", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(29), 3, GFLAGS), GATE(PCLK_HDMI_CTRL, "pclk_hdmi_ctrl", "pclk_hdcp", 0, RK3399_CLKGATE_CON(29), 6, GFLAGS), @@ -1129,7 +1216,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = { COMPOSITE(PCLK_EDP, "pclk_edp", mux_pll_src_cpll_gpll_p, 0, RK3399_CLKSEL_CON(44), 15, 1, MFLAGS, 8, 6, DFLAGS, RK3399_CLKGATE_CON(11), 11, GFLAGS), - GATE(PCLK_EDP_NOC, "pclk_edp_noc", "pclk_edp", CLK_IGNORE_UNUSED, + GATE(PCLK_EDP_NOC, "pclk_edp_noc", "pclk_edp", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(32), 12, GFLAGS), GATE(PCLK_EDP_CTRL, "pclk_edp_ctrl", "pclk_edp", 0, RK3399_CLKGATE_CON(32), 13, GFLAGS), @@ -1143,7 +1230,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = { RK3399_CLKGATE_CON(11), 7, GFLAGS), /* vop0 */ - COMPOSITE(ACLK_VOP0_PRE, "aclk_vop0_pre", mux_pll_src_vpll_cpll_gpll_npll_p, 0, + COMPOSITE(ACLK_VOP0_PRE, "aclk_vop0_pre", mux_pll_src_dmyvpll_cpll_gpll_npll_p, 0, RK3399_CLKSEL_CON(47), 6, 2, MFLAGS, 0, 5, DFLAGS, RK3399_CLKGATE_CON(10), 8, GFLAGS), COMPOSITE_NOMUX(0, "hclk_vop0_pre", "aclk_vop0_pre", 0, @@ -1152,28 +1239,35 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = { GATE(ACLK_VOP0, "aclk_vop0", "aclk_vop0_pre", 0, RK3399_CLKGATE_CON(28), 3, GFLAGS), - GATE(ACLK_VOP0_NOC, "aclk_vop0_noc", "aclk_vop0_pre", CLK_IGNORE_UNUSED, + GATE(ACLK_VOP0_NOC, "aclk_vop0_noc", "aclk_vop0_pre", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(28), 1, GFLAGS), GATE(HCLK_VOP0, "hclk_vop0", "hclk_vop0_pre", 0, RK3399_CLKGATE_CON(28), 2, GFLAGS), - GATE(HCLK_VOP0_NOC, "hclk_vop0_noc", "hclk_vop0_pre", CLK_IGNORE_UNUSED, + GATE(HCLK_VOP0_NOC, "hclk_vop0_noc", "hclk_vop0_pre", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(28), 0, GFLAGS), - COMPOSITE(DCLK_VOP0_DIV, "dclk_vop0_div", mux_pll_src_vpll_cpll_gpll_p, 0, +#ifdef RK3399_TWO_PLL_FOR_VOP + COMPOSITE(DCLK_VOP0_DIV, "dclk_vop0_div", mux_pll_src_vpll_cpll_gpll_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, + RK3399_CLKSEL_CON(49), 8, 2, MFLAGS, 0, 8, DFLAGS, + RK3399_CLKGATE_CON(10), 12, GFLAGS), +#else + COMPOSITE(DCLK_VOP0_DIV, "dclk_vop0_div", mux_pll_src_vpll_cpll_gpll_p, CLK_SET_RATE_PARENT, RK3399_CLKSEL_CON(49), 8, 2, MFLAGS, 0, 8, DFLAGS, RK3399_CLKGATE_CON(10), 12, GFLAGS), +#endif - COMPOSITE_FRACMUX_NOGATE(DCLK_VOP0_FRAC, "dclk_vop0_frac", "dclk_vop0_div", 0, + /* The VOP0 is main screen, it is able to re-set parent rate. */ + COMPOSITE_FRACMUX_NOGATE(0, "dclk_vop0_frac", "dclk_vop0_div", CLK_SET_RATE_PARENT, RK3399_CLKSEL_CON(106), 0, - &rk3399_dclk_vop0_fracmux), + &rk3399_dclk_vop0_fracmux, RK3399_VOP_FRAC_MAX_PRATE), - COMPOSITE(SCLK_VOP0_PWM, "clk_vop0_pwm", mux_pll_src_vpll_cpll_gpll_24m_p, 0, + COMPOSITE(SCLK_VOP0_PWM, "clk_vop0_pwm", mux_pll_src_dmyvpll_cpll_gpll_gpll_p, 0, RK3399_CLKSEL_CON(51), 6, 2, MFLAGS, 0, 5, DFLAGS, RK3399_CLKGATE_CON(10), 14, GFLAGS), /* vop1 */ - COMPOSITE(ACLK_VOP1_PRE, "aclk_vop1_pre", mux_pll_src_vpll_cpll_gpll_npll_p, 0, + COMPOSITE(ACLK_VOP1_PRE, "aclk_vop1_pre", mux_pll_src_dmyvpll_cpll_gpll_npll_p, 0, RK3399_CLKSEL_CON(48), 6, 2, MFLAGS, 0, 5, DFLAGS, RK3399_CLKGATE_CON(10), 10, GFLAGS), COMPOSITE_NOMUX(0, "hclk_vop1_pre", "aclk_vop1_pre", 0, @@ -1182,23 +1276,30 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = { GATE(ACLK_VOP1, "aclk_vop1", "aclk_vop1_pre", 0, RK3399_CLKGATE_CON(28), 7, GFLAGS), - GATE(ACLK_VOP1_NOC, "aclk_vop1_noc", "aclk_vop1_pre", CLK_IGNORE_UNUSED, + GATE(ACLK_VOP1_NOC, "aclk_vop1_noc", "aclk_vop1_pre", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(28), 5, GFLAGS), GATE(HCLK_VOP1, "hclk_vop1", "hclk_vop1_pre", 0, RK3399_CLKGATE_CON(28), 6, GFLAGS), - GATE(HCLK_VOP1_NOC, "hclk_vop1_noc", "hclk_vop1_pre", CLK_IGNORE_UNUSED, + GATE(HCLK_VOP1_NOC, "hclk_vop1_noc", "hclk_vop1_pre", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(28), 4, GFLAGS), - COMPOSITE(DCLK_VOP1_DIV, "dclk_vop1_div", mux_pll_src_vpll_cpll_gpll_p, 0, + /* The VOP1 is sub screen, it is note able to re-set parent rate. */ +#ifdef RK3399_TWO_PLL_FOR_VOP + COMPOSITE(DCLK_VOP1_DIV, "dclk_vop1_div", mux_pll_src_vpll_cpll_gpll_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, RK3399_CLKSEL_CON(50), 8, 2, MFLAGS, 0, 8, DFLAGS, RK3399_CLKGATE_CON(10), 13, GFLAGS), +#else + COMPOSITE(DCLK_VOP1_DIV, "dclk_vop1_div", mux_pll_src_dmyvpll_cpll_gpll_p, 0, + RK3399_CLKSEL_CON(50), 8, 2, MFLAGS, 0, 8, DFLAGS, + RK3399_CLKGATE_CON(10), 13, GFLAGS), +#endif COMPOSITE_FRACMUX_NOGATE(DCLK_VOP1_FRAC, "dclk_vop1_frac", "dclk_vop1_div", 0, RK3399_CLKSEL_CON(107), 0, - &rk3399_dclk_vop1_fracmux), + &rk3399_dclk_vop1_fracmux, RK3399_VOP_FRAC_MAX_PRATE), - COMPOSITE(SCLK_VOP1_PWM, "clk_vop1_pwm", mux_pll_src_vpll_cpll_gpll_24m_p, CLK_IGNORE_UNUSED, + COMPOSITE(SCLK_VOP1_PWM, "clk_vop1_pwm", mux_pll_src_dmyvpll_cpll_gpll_gpll_p, 0, RK3399_CLKSEL_CON(52), 6, 2, MFLAGS, 0, 5, DFLAGS, RK3399_CLKGATE_CON(10), 15, GFLAGS), @@ -1210,14 +1311,12 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = { RK3399_CLKSEL_CON(53), 8, 5, DFLAGS, RK3399_CLKGATE_CON(12), 9, GFLAGS), - GATE(ACLK_ISP0_NOC, "aclk_isp0_noc", "aclk_isp0", CLK_IGNORE_UNUSED, + GATE(ACLK_ISP0_NOC, "aclk_isp0_noc", "aclk_isp0", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(27), 1, GFLAGS), GATE(ACLK_ISP0_WRAPPER, "aclk_isp0_wrapper", "aclk_isp0", 0, RK3399_CLKGATE_CON(27), 5, GFLAGS), - GATE(HCLK_ISP1_WRAPPER, "hclk_isp1_wrapper", "aclk_isp0", 0, - RK3399_CLKGATE_CON(27), 7, GFLAGS), - GATE(HCLK_ISP0_NOC, "hclk_isp0_noc", "hclk_isp0", CLK_IGNORE_UNUSED, + GATE(HCLK_ISP0_NOC, "hclk_isp0_noc", "hclk_isp0", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(27), 0, GFLAGS), GATE(HCLK_ISP0_WRAPPER, "hclk_isp0_wrapper", "hclk_isp0", 0, RK3399_CLKGATE_CON(27), 4, GFLAGS), @@ -1233,13 +1332,15 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = { RK3399_CLKSEL_CON(54), 8, 5, DFLAGS, RK3399_CLKGATE_CON(12), 11, GFLAGS), - GATE(ACLK_ISP1_NOC, "aclk_isp1_noc", "aclk_isp1", CLK_IGNORE_UNUSED, + GATE(ACLK_ISP1_NOC, "aclk_isp1_noc", "aclk_isp1", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(27), 3, GFLAGS), + GATE(ACLK_ISP1_WRAPPER, "aclk_isp1_wrapper", "aclk_isp1", 0, + RK3399_CLKGATE_CON(27), 8, GFLAGS), - GATE(HCLK_ISP1_NOC, "hclk_isp1_noc", "hclk_isp1", CLK_IGNORE_UNUSED, + GATE(HCLK_ISP1_NOC, "hclk_isp1_noc", "hclk_isp1", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(27), 2, GFLAGS), - GATE(ACLK_ISP1_WRAPPER, "aclk_isp1_wrapper", "hclk_isp1", 0, - RK3399_CLKGATE_CON(27), 8, GFLAGS), + GATE(HCLK_ISP1_WRAPPER, "hclk_isp1_wrapper", "hclk_isp1", 0, + RK3399_CLKGATE_CON(27), 7, GFLAGS), COMPOSITE(SCLK_ISP1, "clk_isp1", mux_pll_src_cpll_gpll_npll_p, 0, RK3399_CLKSEL_CON(55), 14, 2, MFLAGS, 8, 5, DFLAGS, @@ -1257,7 +1358,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = { RK3399_CLKGATE_CON(27), 6, GFLAGS), /* cif */ - COMPOSITE_NODIV(0, "clk_cifout_src", mux_pll_src_cpll_gpll_npll_p, 0, + COMPOSITE_NODIV(SCLK_CIF_OUT_SRC, "clk_cifout_src", mux_pll_src_cpll_gpll_npll_p, 0, RK3399_CLKSEL_CON(56), 6, 2, MFLAGS, RK3399_CLKGATE_CON(10), 7, GFLAGS), @@ -1265,12 +1366,12 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = { RK3399_CLKSEL_CON(56), 5, 1, MFLAGS, 0, 5, DFLAGS), /* gic */ - COMPOSITE(ACLK_GIC_PRE, "aclk_gic_pre", mux_pll_src_cpll_gpll_p, CLK_IGNORE_UNUSED, + COMPOSITE(ACLK_GIC_PRE, "aclk_gic_pre", mux_pll_src_cpll_gpll_p, CLK_IS_CRITICAL, RK3399_CLKSEL_CON(56), 15, 1, MFLAGS, 8, 5, DFLAGS, RK3399_CLKGATE_CON(12), 12, GFLAGS), - GATE(ACLK_GIC, "aclk_gic", "aclk_gic_pre", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(33), 0, GFLAGS), - GATE(ACLK_GIC_NOC, "aclk_gic_noc", "aclk_gic_pre", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(33), 1, GFLAGS), + GATE(ACLK_GIC, "aclk_gic", "aclk_gic_pre", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(33), 0, GFLAGS), + GATE(ACLK_GIC_NOC, "aclk_gic_noc", "aclk_gic_pre", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(33), 1, GFLAGS), GATE(ACLK_GIC_ADB400_CORE_L_2_GIC, "aclk_gic_adb400_core_l_2_gic", "aclk_gic_pre", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(33), 2, GFLAGS), GATE(ACLK_GIC_ADB400_CORE_B_2_GIC, "aclk_gic_adb400_core_b_2_gic", "aclk_gic_pre", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(33), 3, GFLAGS), GATE(ACLK_GIC_ADB400_GIC_2_CORE_L, "aclk_gic_adb400_gic_2_core_l", "aclk_gic_pre", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(33), 4, GFLAGS), @@ -1301,19 +1402,19 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = { SGRF_GATE(PCLK_WDT, "pclk_wdt", "pclk_alive"), GATE(SCLK_MIPIDPHY_REF, "clk_mipidphy_ref", "xin24m", 0, RK3399_CLKGATE_CON(11), 14, GFLAGS), - GATE(SCLK_DPHY_PLL, "clk_dphy_pll", "clk_mipidphy_ref", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(21), 0, GFLAGS), + GATE(SCLK_DPHY_PLL, "clk_dphy_pll", "clk_mipidphy_ref", 0, RK3399_CLKGATE_CON(21), 0, GFLAGS), GATE(SCLK_MIPIDPHY_CFG, "clk_mipidphy_cfg", "xin24m", 0, RK3399_CLKGATE_CON(11), 15, GFLAGS), - GATE(SCLK_DPHY_TX0_CFG, "clk_dphy_tx0_cfg", "clk_mipidphy_cfg", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(21), 1, GFLAGS), - GATE(SCLK_DPHY_TX1RX1_CFG, "clk_dphy_tx1rx1_cfg", "clk_mipidphy_cfg", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(21), 2, GFLAGS), - GATE(SCLK_DPHY_RX0_CFG, "clk_dphy_rx0_cfg", "clk_mipidphy_cfg", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(21), 3, GFLAGS), + GATE(SCLK_DPHY_TX0_CFG, "clk_dphy_tx0_cfg", "clk_mipidphy_cfg", 0, RK3399_CLKGATE_CON(21), 1, GFLAGS), + GATE(SCLK_DPHY_TX1RX1_CFG, "clk_dphy_tx1rx1_cfg", "clk_mipidphy_cfg", 0, RK3399_CLKGATE_CON(21), 2, GFLAGS), + GATE(SCLK_DPHY_RX0_CFG, "clk_dphy_rx0_cfg", "clk_mipidphy_cfg", 0, RK3399_CLKGATE_CON(21), 3, GFLAGS), /* testout */ MUX(0, "clk_test_pre", mux_pll_src_cpll_gpll_p, CLK_SET_RATE_PARENT, RK3399_CLKSEL_CON(58), 7, 1, MFLAGS), COMPOSITE_FRAC(0, "clk_test_frac", "clk_test_pre", 0, RK3399_CLKSEL_CON(105), 0, - RK3399_CLKGATE_CON(13), 9, GFLAGS), + RK3399_CLKGATE_CON(13), 9, GFLAGS, 0), DIV(0, "clk_test_24m", "xin24m", 0, RK3399_CLKSEL_CON(57), 6, 10, DFLAGS), @@ -1385,13 +1486,13 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = { RK3399_CLKGATE_CON(13), 11, GFLAGS), /* ddrc */ - GATE(0, "clk_ddrc_lpll_src", "lpll", 0, RK3399_CLKGATE_CON(3), + GATE(0, "clk_ddrc_lpll_src", "lpll", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(3), 0, GFLAGS), - GATE(0, "clk_ddrc_bpll_src", "bpll", 0, RK3399_CLKGATE_CON(3), + GATE(0, "clk_ddrc_bpll_src", "bpll", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(3), 1, GFLAGS), - GATE(0, "clk_ddrc_dpll_src", "dpll", 0, RK3399_CLKGATE_CON(3), + GATE(0, "clk_ddrc_dpll_src", "dpll", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(3), 2, GFLAGS), - GATE(0, "clk_ddrc_gpll_src", "gpll", 0, RK3399_CLKGATE_CON(3), + GATE(0, "clk_ddrc_gpll_src", "gpll", CLK_IS_CRITICAL, RK3399_CLKGATE_CON(3), 3, GFLAGS), COMPOSITE_DDRCLK(SCLK_DDRC, "sclk_ddrc", mux_ddrclk_p, 0, RK3399_CLKSEL_CON(6), 4, 2, 0, 0, ROCKCHIP_DDRCLK_SIP), @@ -1402,10 +1503,10 @@ static struct rockchip_clk_branch rk3399_clk_pmu_branches[] __initdata = { * PMU CRU Clock-Architecture */ - GATE(0, "fclk_cm0s_pmu_ppll_src", "ppll", 0, + GATE(0, "fclk_cm0s_pmu_ppll_src", "ppll", CLK_IS_CRITICAL, RK3399_PMU_CLKGATE_CON(0), 1, GFLAGS), - COMPOSITE_NOGATE(FCLK_CM0S_SRC_PMU, "fclk_cm0s_src_pmu", mux_fclk_cm0s_pmu_ppll_p, 0, + COMPOSITE_NOGATE(FCLK_CM0S_SRC_PMU, "fclk_cm0s_src_pmu", mux_fclk_cm0s_pmu_ppll_p, CLK_IS_CRITICAL, RK3399_PMU_CLKSEL_CON(0), 15, 1, MFLAGS, 8, 5, DFLAGS), COMPOSITE(SCLK_SPI3_PMU, "clk_spi3_pmu", mux_24m_ppll_p, 0, @@ -1416,9 +1517,9 @@ static struct rockchip_clk_branch rk3399_clk_pmu_branches[] __initdata = { RK3399_PMU_CLKSEL_CON(1), 13, 1, MFLAGS, 8, 5, DFLAGS, RK3399_PMU_CLKGATE_CON(0), 8, GFLAGS), - COMPOSITE_FRACMUX_NOGATE(0, "clk_wifi_frac", "clk_wifi_div", 0, + COMPOSITE_FRACMUX_NOGATE(0, "clk_wifi_frac", "clk_wifi_div", CLK_SET_RATE_PARENT, RK3399_PMU_CLKSEL_CON(7), 0, - &rk3399_pmuclk_wifi_fracmux), + &rk3399_pmuclk_wifi_fracmux, RK3399_WIFI_FRAC_MAX_PRATE), MUX(0, "clk_timer_src_pmu", mux_pll_p, CLK_IGNORE_UNUSED, RK3399_PMU_CLKSEL_CON(1), 15, 1, MFLAGS), @@ -1440,23 +1541,26 @@ static struct rockchip_clk_branch rk3399_clk_pmu_branches[] __initdata = { MUX(0, "clk_testout_2io", mux_clk_testout2_2io_p, CLK_IGNORE_UNUSED, RK3399_PMU_CLKSEL_CON(4), 15, 1, MFLAGS), - COMPOSITE(0, "clk_uart4_div", mux_24m_ppll_p, 0, - RK3399_PMU_CLKSEL_CON(5), 10, 1, MFLAGS, 0, 7, DFLAGS, + MUX(SCLK_UART4_SRC, "clk_uart4_src", mux_24m_ppll_p, CLK_SET_RATE_NO_REPARENT, + RK3399_PMU_CLKSEL_CON(5), 10, 1, MFLAGS), + + COMPOSITE_NOMUX(0, "clk_uart4_div", "clk_uart4_src", CLK_SET_RATE_PARENT, + RK3399_PMU_CLKSEL_CON(5), 0, 7, DFLAGS, RK3399_PMU_CLKGATE_CON(0), 5, GFLAGS), - COMPOSITE_FRACMUX(0, "clk_uart4_frac", "clk_uart4_div", 0, + COMPOSITE_FRACMUX(0, "clk_uart4_frac", "clk_uart4_div", CLK_SET_RATE_PARENT, RK3399_PMU_CLKSEL_CON(6), 0, RK3399_PMU_CLKGATE_CON(0), 6, GFLAGS, - &rk3399_uart4_pmu_fracmux), + &rk3399_uart4_pmu_fracmux, RK3399_UART_FRAC_MAX_PRATE), - DIV(PCLK_SRC_PMU, "pclk_pmu_src", "ppll", CLK_IGNORE_UNUSED, + DIV(PCLK_SRC_PMU, "pclk_pmu_src", "ppll", CLK_IS_CRITICAL, RK3399_PMU_CLKSEL_CON(0), 0, 5, DFLAGS), /* pmu clock gates */ GATE(SCLK_TIMER12_PMU, "clk_timer0_pmu", "clk_timer_src_pmu", 0, RK3399_PMU_CLKGATE_CON(0), 3, GFLAGS), GATE(SCLK_TIMER13_PMU, "clk_timer1_pmu", "clk_timer_src_pmu", 0, RK3399_PMU_CLKGATE_CON(0), 4, GFLAGS), - GATE(SCLK_PVTM_PMU, "clk_pvtm_pmu", "xin24m", CLK_IGNORE_UNUSED, RK3399_PMU_CLKGATE_CON(0), 7, GFLAGS), + GATE(SCLK_PVTM_PMU, "clk_pvtm_pmu", "xin24m", 0, RK3399_PMU_CLKGATE_CON(0), 7, GFLAGS), GATE(PCLK_PMU, "pclk_pmu", "pclk_pmu_src", CLK_IGNORE_UNUSED, RK3399_PMU_CLKGATE_CON(1), 0, GFLAGS), GATE(PCLK_PMUGRF_PMU, "pclk_pmugrf_pmu", "pclk_pmu_src", CLK_IGNORE_UNUSED, RK3399_PMU_CLKGATE_CON(1), 1, GFLAGS), @@ -1464,69 +1568,60 @@ static struct rockchip_clk_branch rk3399_clk_pmu_branches[] __initdata = { GATE(PCLK_GPIO0_PMU, "pclk_gpio0_pmu", "pclk_pmu_src", 0, RK3399_PMU_CLKGATE_CON(1), 3, GFLAGS), GATE(PCLK_GPIO1_PMU, "pclk_gpio1_pmu", "pclk_pmu_src", 0, RK3399_PMU_CLKGATE_CON(1), 4, GFLAGS), GATE(PCLK_SGRF_PMU, "pclk_sgrf_pmu", "pclk_pmu_src", CLK_IGNORE_UNUSED, RK3399_PMU_CLKGATE_CON(1), 5, GFLAGS), - GATE(PCLK_NOC_PMU, "pclk_noc_pmu", "pclk_pmu_src", CLK_IGNORE_UNUSED, RK3399_PMU_CLKGATE_CON(1), 6, GFLAGS), + GATE(PCLK_NOC_PMU, "pclk_noc_pmu", "pclk_pmu_src", CLK_IS_CRITICAL, RK3399_PMU_CLKGATE_CON(1), 6, GFLAGS), GATE(PCLK_I2C0_PMU, "pclk_i2c0_pmu", "pclk_pmu_src", 0, RK3399_PMU_CLKGATE_CON(1), 7, GFLAGS), GATE(PCLK_I2C4_PMU, "pclk_i2c4_pmu", "pclk_pmu_src", 0, RK3399_PMU_CLKGATE_CON(1), 8, GFLAGS), GATE(PCLK_I2C8_PMU, "pclk_i2c8_pmu", "pclk_pmu_src", 0, RK3399_PMU_CLKGATE_CON(1), 9, GFLAGS), - GATE(PCLK_RKPWM_PMU, "pclk_rkpwm_pmu", "pclk_pmu_src", 0, RK3399_PMU_CLKGATE_CON(1), 10, GFLAGS), + GATE(PCLK_RKPWM_PMU, "pclk_rkpwm_pmu", "pclk_pmu_src", CLK_IS_CRITICAL, RK3399_PMU_CLKGATE_CON(1), 10, GFLAGS), GATE(PCLK_SPI3_PMU, "pclk_spi3_pmu", "pclk_pmu_src", 0, RK3399_PMU_CLKGATE_CON(1), 11, GFLAGS), GATE(PCLK_TIMER_PMU, "pclk_timer_pmu", "pclk_pmu_src", 0, RK3399_PMU_CLKGATE_CON(1), 12, GFLAGS), GATE(PCLK_MAILBOX_PMU, "pclk_mailbox_pmu", "pclk_pmu_src", 0, RK3399_PMU_CLKGATE_CON(1), 13, GFLAGS), GATE(PCLK_UART4_PMU, "pclk_uart4_pmu", "pclk_pmu_src", 0, RK3399_PMU_CLKGATE_CON(1), 14, GFLAGS), GATE(PCLK_WDT_M0_PMU, "pclk_wdt_m0_pmu", "pclk_pmu_src", 0, RK3399_PMU_CLKGATE_CON(1), 15, GFLAGS), - GATE(FCLK_CM0S_PMU, "fclk_cm0s_pmu", "fclk_cm0s_src_pmu", CLK_IGNORE_UNUSED, RK3399_PMU_CLKGATE_CON(2), 0, GFLAGS), - GATE(SCLK_CM0S_PMU, "sclk_cm0s_pmu", "fclk_cm0s_src_pmu", CLK_IGNORE_UNUSED, RK3399_PMU_CLKGATE_CON(2), 1, GFLAGS), - GATE(HCLK_CM0S_PMU, "hclk_cm0s_pmu", "fclk_cm0s_src_pmu", CLK_IGNORE_UNUSED, RK3399_PMU_CLKGATE_CON(2), 2, GFLAGS), - GATE(DCLK_CM0S_PMU, "dclk_cm0s_pmu", "fclk_cm0s_src_pmu", CLK_IGNORE_UNUSED, RK3399_PMU_CLKGATE_CON(2), 3, GFLAGS), - GATE(HCLK_NOC_PMU, "hclk_noc_pmu", "fclk_cm0s_src_pmu", CLK_IGNORE_UNUSED, RK3399_PMU_CLKGATE_CON(2), 5, GFLAGS), + GATE(FCLK_CM0S_PMU, "fclk_cm0s_pmu", "fclk_cm0s_src_pmu", 0, RK3399_PMU_CLKGATE_CON(2), 0, GFLAGS), + GATE(SCLK_CM0S_PMU, "sclk_cm0s_pmu", "fclk_cm0s_src_pmu", 0, RK3399_PMU_CLKGATE_CON(2), 1, GFLAGS), + GATE(HCLK_CM0S_PMU, "hclk_cm0s_pmu", "fclk_cm0s_src_pmu", 0, RK3399_PMU_CLKGATE_CON(2), 2, GFLAGS), + GATE(DCLK_CM0S_PMU, "dclk_cm0s_pmu", "fclk_cm0s_src_pmu", 0, RK3399_PMU_CLKGATE_CON(2), 3, GFLAGS), + GATE(HCLK_NOC_PMU, "hclk_noc_pmu", "fclk_cm0s_src_pmu", CLK_IS_CRITICAL, RK3399_PMU_CLKGATE_CON(2), 5, GFLAGS), }; -static const char *const rk3399_cru_critical_clocks[] __initconst = { - "aclk_cci_pre", - "aclk_gic", - "aclk_gic_noc", - "aclk_hdcp_noc", - "hclk_hdcp_noc", - "pclk_hdcp_noc", - "pclk_perilp0", - "pclk_perilp0", - "hclk_perilp0", - "hclk_perilp0_noc", - "pclk_perilp1", - "pclk_perilp1_noc", - "pclk_perihp", - "pclk_perihp_noc", - "hclk_perihp", - "aclk_perihp", - "aclk_perihp_noc", - "aclk_perilp0", - "aclk_perilp0_noc", - "hclk_perilp1", - "hclk_perilp1_noc", - "aclk_dmac0_perilp", - "aclk_emmc_noc", - "gpll_hclk_perilp1_src", - "gpll_aclk_perilp0_src", - "gpll_aclk_perihp_src", - "aclk_vio_noc", +static void __iomem *rk3399_cru_base; +static void __iomem *rk3399_pmucru_base; - /* ddrc */ - "sclk_ddrc" -}; +void rk3399_dump_cru(void) +{ + if (rk3399_cru_base) { + pr_warn("CRU:\n"); + print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, + 32, 4, rk3399_cru_base, + 0x594, false); + } + if (rk3399_pmucru_base) { + pr_warn("PMU CRU:\n"); + print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, + 32, 4, rk3399_pmucru_base, + 0x134, false); + } +} +EXPORT_SYMBOL_GPL(rk3399_dump_cru); + +static int rk3399_clk_panic(struct notifier_block *this, + unsigned long ev, void *ptr) +{ + rk3399_dump_cru(); + return NOTIFY_DONE; +} -static const char *const rk3399_pmucru_critical_clocks[] __initconst = { - "ppll", - "pclk_pmu_src", - "fclk_cm0s_src_pmu", - "clk_timer_src_pmu", - "pclk_rkpwm_pmu", +static struct notifier_block rk3399_clk_panic_block = { + .notifier_call = rk3399_clk_panic, }; static void __init rk3399_clk_init(struct device_node *np) { struct rockchip_clk_provider *ctx; void __iomem *reg_base; + struct clk **clks; reg_base = of_iomap(np, 0); if (!reg_base) { @@ -1534,12 +1629,15 @@ static void __init rk3399_clk_init(struct device_node *np) return; } + rk3399_cru_base = reg_base; + ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS); if (IS_ERR(ctx)) { pr_err("%s: rockchip clk init failed\n", __func__); iounmap(reg_base); return; } + clks = ctx->clk_data.clks; rockchip_clk_register_plls(ctx, rk3399_pll_clks, ARRAY_SIZE(rk3399_pll_clks), -1); @@ -1547,16 +1645,13 @@ static void __init rk3399_clk_init(struct device_node *np) rockchip_clk_register_branches(ctx, rk3399_clk_branches, ARRAY_SIZE(rk3399_clk_branches)); - rockchip_clk_protect_critical(rk3399_cru_critical_clocks, - ARRAY_SIZE(rk3399_cru_critical_clocks)); - rockchip_clk_register_armclk(ctx, ARMCLKL, "armclkl", - mux_armclkl_p, ARRAY_SIZE(mux_armclkl_p), + 4, clks[PLL_APLLL], clks[PLL_GPLL], &rk3399_cpuclkl_data, rk3399_cpuclkl_rates, ARRAY_SIZE(rk3399_cpuclkl_rates)); rockchip_clk_register_armclk(ctx, ARMCLKB, "armclkb", - mux_armclkb_p, ARRAY_SIZE(mux_armclkb_p), + 4, clks[PLL_APLLB], clks[PLL_GPLL], &rk3399_cpuclkb_data, rk3399_cpuclkb_rates, ARRAY_SIZE(rk3399_cpuclkb_rates)); @@ -1580,6 +1675,8 @@ static void __init rk3399_pmu_clk_init(struct device_node *np) return; } + rk3399_pmucru_base = reg_base; + ctx = rockchip_clk_init(np, reg_base, CLKPMU_NR_CLKS); if (IS_ERR(ctx)) { pr_err("%s: rockchip pmu clk init failed\n", __func__); @@ -1593,13 +1690,13 @@ static void __init rk3399_pmu_clk_init(struct device_node *np) rockchip_clk_register_branches(ctx, rk3399_clk_pmu_branches, ARRAY_SIZE(rk3399_clk_pmu_branches)); - rockchip_clk_protect_critical(rk3399_pmucru_critical_clocks, - ARRAY_SIZE(rk3399_pmucru_critical_clocks)); - rockchip_register_softrst(np, 2, reg_base + RK3399_PMU_SOFTRST_CON(0), ROCKCHIP_SOFTRST_HIWORD_MASK); rockchip_clk_of_add_provider(np, ctx); + + atomic_notifier_chain_register(&panic_notifier_list, + &rk3399_clk_panic_block); } CLK_OF_DECLARE(rk3399_cru_pmu, "rockchip,rk3399-pmucru", rk3399_pmu_clk_init); diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c index b443169dd408..6c8e47067032 100644 --- a/drivers/clk/rockchip/clk.c +++ b/drivers/clk/rockchip/clk.c @@ -38,6 +38,7 @@ static struct clk *rockchip_clk_register_branch(const char *name, const char *const *parent_names, u8 num_parents, void __iomem *base, int muxdiv_offset, u8 mux_shift, u8 mux_width, u8 mux_flags, + u32 *mux_table, int div_offset, u8 div_shift, u8 div_width, u8 div_flags, struct clk_div_table *div_table, int gate_offset, u8 gate_shift, u8 gate_flags, unsigned long flags, @@ -60,6 +61,7 @@ static struct clk *rockchip_clk_register_branch(const char *name, mux->shift = mux_shift; mux->mask = BIT(mux_width) - 1; mux->flags = mux_flags; + mux->table = mux_table; mux->lock = lock; mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops : &clk_mux_ops; @@ -182,12 +184,43 @@ static void rockchip_fractional_approximation(struct clk_hw *hw, unsigned long p_rate, p_parent_rate; struct clk_hw *p_parent; unsigned long scale; + u32 div; p_rate = clk_hw_get_rate(clk_hw_get_parent(hw)); - if ((rate * 20 > p_rate) && (p_rate % rate != 0)) { + if (((rate * 20 > p_rate) && (p_rate % rate != 0)) || + (fd->max_prate && fd->max_prate < p_rate)) { p_parent = clk_hw_get_parent(clk_hw_get_parent(hw)); - p_parent_rate = clk_hw_get_rate(p_parent); - *parent_rate = p_parent_rate; + if (!p_parent) { + *parent_rate = p_rate; + } else { + p_parent_rate = clk_hw_get_rate(p_parent); + *parent_rate = p_parent_rate; + if (fd->max_prate && p_parent_rate > fd->max_prate) { + div = DIV_ROUND_UP(p_parent_rate, + fd->max_prate); + *parent_rate = p_parent_rate / div; + } + } + + if (*parent_rate < rate * 20) { + /* + * Fractional frequency divider to do + * integer frequency divider does not + * need 20 times the limit. + */ + if (!(*parent_rate % rate)) { + *m = 1; + *n = *parent_rate / rate; + return; + } else if (!(fd->flags & CLK_FRAC_DIVIDER_NO_LIMIT)) { + pr_warn("%s p_rate(%ld) is low than rate(%ld)*20, use integer or half-div\n", + clk_hw_get_name(hw), + *parent_rate, rate); + *m = 0; + *n = 1; + return; + } + } } /* @@ -210,7 +243,7 @@ static struct clk *rockchip_clk_register_frac_branch( void __iomem *base, int muxdiv_offset, u8 div_flags, int gate_offset, u8 gate_shift, u8 gate_flags, unsigned long flags, struct rockchip_clk_branch *child, - spinlock_t *lock) + unsigned long max_prate, spinlock_t *lock) { struct clk_hw *hw; struct rockchip_clk_frac *frac; @@ -251,6 +284,7 @@ static struct clk *rockchip_clk_register_frac_branch( div->nmask = GENMASK(div->nwidth - 1, 0) << div->nshift; div->lock = lock; div->approximation = rockchip_fractional_approximation; + div->max_prate = max_prate; div_ops = &clk_fractional_divider_ops; hw = clk_hw_register_composite(NULL, name, parent_names, num_parents, @@ -278,6 +312,8 @@ static struct clk *rockchip_clk_register_frac_branch( frac_mux->shift = child->mux_shift; frac_mux->mask = BIT(child->mux_width) - 1; frac_mux->flags = child->mux_flags; + if (child->mux_table) + frac_mux->table = child->mux_table; frac_mux->lock = lock; frac_mux->hw.init = &init; @@ -360,6 +396,61 @@ static struct clk *rockchip_clk_register_factor_branch(const char *name, return hw->clk; } +static struct clk *rockchip_clk_register_composite_brother_branch( + struct rockchip_clk_provider *ctx, const char *name, + const char *const *parent_names, u8 num_parents, + void __iomem *base, int muxdiv_offset, u8 mux_shift, + u8 mux_width, u8 mux_flags, u32 *mux_table, + int div_offset, u8 div_shift, u8 div_width, u8 div_flags, + struct clk_div_table *div_table, int gate_offset, + u8 gate_shift, u8 gate_flags, unsigned long flags, + struct rockchip_clk_branch *brother, spinlock_t *lock) +{ + struct clk *clk, *brother_clk; + struct clk_composite *composite, *brother_composite; + struct clk_hw *hw, *brother_hw; + + if (brother && brother->branch_type != branch_half_divider) { + pr_err("%s: composite brother for %s can only be a halfdiv\n", + __func__, name); + return ERR_PTR(-EINVAL); + } + + clk = rockchip_clk_register_branch(name, parent_names, num_parents, + base, muxdiv_offset, mux_shift, + mux_width, mux_flags, mux_table, + div_offset, div_shift, div_width, + div_flags, div_table, + gate_offset, gate_shift, gate_flags, + flags, lock); + if (IS_ERR(clk)) + return clk; + + brother_clk = rockchip_clk_register_halfdiv(brother->name, + brother->parent_names, brother->num_parents, + base, brother->muxdiv_offset, + brother->mux_shift, brother->mux_width, + brother->mux_flags, brother->div_offset, + brother->div_shift, brother->div_width, + brother->div_flags, brother->gate_offset, + brother->gate_shift, brother->gate_flags, + flags, lock); + if (IS_ERR(brother_clk)) + return brother_clk; + rockchip_clk_add_lookup(ctx, brother_clk, brother->id); + + hw = __clk_get_hw(clk); + brother_hw = __clk_get_hw(brother_clk); + if (hw && brother_hw) { + composite = to_clk_composite(hw); + brother_composite = to_clk_composite(brother_hw); + composite->brother_hw = brother_hw; + brother_composite->brother_hw = hw; + } + + return clk; +} + struct rockchip_clk_provider *rockchip_clk_init(struct device_node *np, void __iomem *base, unsigned long nr_clks) @@ -387,6 +478,8 @@ struct rockchip_clk_provider *rockchip_clk_init(struct device_node *np, ctx->grf = syscon_regmap_lookup_by_phandle(ctx->cru_node, "rockchip,grf"); + ctx->pmugrf = syscon_regmap_lookup_by_phandle(ctx->cru_node, + "rockchip,pmugrf"); return ctx; @@ -452,11 +545,22 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx, /* catch simple muxes */ switch (list->branch_type) { case branch_mux: - clk = clk_register_mux(NULL, list->name, - list->parent_names, list->num_parents, - flags, ctx->reg_base + list->muxdiv_offset, - list->mux_shift, list->mux_width, - list->mux_flags, &ctx->lock); + if (list->mux_table) + clk = clk_register_mux_table(NULL, list->name, + list->parent_names, list->num_parents, + flags, + ctx->reg_base + list->muxdiv_offset, + list->mux_shift, + BIT(list->mux_width) - 1, + list->mux_flags, list->mux_table, + &ctx->lock); + else + clk = clk_register_mux(NULL, list->name, + list->parent_names, list->num_parents, + flags, + ctx->reg_base + list->muxdiv_offset, + list->mux_shift, list->mux_width, + list->mux_flags, &ctx->lock); break; case branch_muxgrf: clk = rockchip_clk_register_muxgrf(list->name, @@ -465,6 +569,13 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx, list->mux_shift, list->mux_width, list->mux_flags); break; + case branch_muxpmugrf: + clk = rockchip_clk_register_muxgrf(list->name, + list->parent_names, list->num_parents, + flags, ctx->pmugrf, list->muxdiv_offset, + list->mux_shift, list->mux_width, + list->mux_flags); + break; case branch_divider: if (list->div_table) clk = clk_register_divider_table(NULL, @@ -488,17 +599,18 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx, list->div_flags, list->gate_offset, list->gate_shift, list->gate_flags, flags, list->child, - &ctx->lock); + list->max_prate, &ctx->lock); break; case branch_half_divider: clk = rockchip_clk_register_halfdiv(list->name, list->parent_names, list->num_parents, ctx->reg_base, list->muxdiv_offset, list->mux_shift, list->mux_width, - list->mux_flags, list->div_shift, - list->div_width, list->div_flags, - list->gate_offset, list->gate_shift, - list->gate_flags, flags, &ctx->lock); + list->mux_flags, list->div_offset, + list->div_shift, list->div_width, + list->div_flags, list->gate_offset, + list->gate_shift, list->gate_flags, + flags, &ctx->lock); break; case branch_gate: flags |= CLK_SET_RATE_PARENT; @@ -514,11 +626,25 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx, ctx->reg_base, list->muxdiv_offset, list->mux_shift, list->mux_width, list->mux_flags, - list->div_offset, list->div_shift, list->div_width, + list->mux_table, list->div_offset, + list->div_shift, list->div_width, list->div_flags, list->div_table, list->gate_offset, list->gate_shift, list->gate_flags, flags, &ctx->lock); break; + case branch_composite_brother: + clk = rockchip_clk_register_composite_brother_branch( + ctx, list->name, list->parent_names, + list->num_parents, ctx->reg_base, + list->muxdiv_offset, list->mux_shift, + list->mux_width, list->mux_flags, + list->mux_table, list->div_offset, + list->div_shift, list->div_width, + list->div_flags, list->div_table, + list->gate_offset, list->gate_shift, + list->gate_flags, flags, list->child, + &ctx->lock); + break; case branch_mmc: clk = rockchip_clk_register_mmc( list->name, @@ -549,7 +675,17 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx, list->muxdiv_offset, list->mux_shift, list->mux_width, list->div_shift, list->div_width, list->div_flags, - ctx->reg_base, &ctx->lock); + ctx->reg_base); + break; + case branch_dclk_divider: + clk = rockchip_clk_register_dclk_branch(list->name, + list->parent_names, list->num_parents, + ctx->reg_base, list->muxdiv_offset, list->mux_shift, + list->mux_width, list->mux_flags, + list->div_offset, list->div_shift, list->div_width, + list->div_flags, list->div_table, + list->gate_offset, list->gate_shift, + list->gate_flags, flags, list->max_prate, &ctx->lock); break; } @@ -573,15 +709,17 @@ EXPORT_SYMBOL_GPL(rockchip_clk_register_branches); void rockchip_clk_register_armclk(struct rockchip_clk_provider *ctx, unsigned int lookup_id, - const char *name, const char *const *parent_names, + const char *name, u8 num_parents, + struct clk *parent, struct clk *alt_parent, const struct rockchip_cpuclk_reg_data *reg_data, const struct rockchip_cpuclk_rate_table *rates, int nrates) { struct clk *clk; - clk = rockchip_clk_register_cpuclk(name, parent_names, num_parents, + clk = rockchip_clk_register_cpuclk(name, num_parents, + parent, alt_parent, reg_data, rates, nrates, ctx->reg_base, &ctx->lock); if (IS_ERR(clk)) { @@ -594,20 +732,20 @@ void rockchip_clk_register_armclk(struct rockchip_clk_provider *ctx, } EXPORT_SYMBOL_GPL(rockchip_clk_register_armclk); -void rockchip_clk_protect_critical(const char *const clocks[], - int nclocks) -{ - int i; - - /* Protect the clocks that needs to stay on */ - for (i = 0; i < nclocks; i++) { - struct clk *clk = __clk_lookup(clocks[i]); +void (*rk_dump_cru)(void); +EXPORT_SYMBOL(rk_dump_cru); - if (clk) - clk_prepare_enable(clk); - } +static int rk_clk_panic(struct notifier_block *this, + unsigned long ev, void *ptr) +{ + if (rk_dump_cru) + rk_dump_cru(); + return NOTIFY_DONE; } -EXPORT_SYMBOL_GPL(rockchip_clk_protect_critical); + +static struct notifier_block rk_clk_panic_block = { + .notifier_call = rk_clk_panic, +}; static void __iomem *rst_base; static unsigned int reg_restart; @@ -641,5 +779,7 @@ rockchip_register_restart_notifier(struct rockchip_clk_provider *ctx, if (ret) pr_err("%s: cannot register restart handler, %d\n", __func__, ret); + atomic_notifier_chain_register(&panic_notifier_list, + &rk_clk_panic_block); } EXPORT_SYMBOL_GPL(rockchip_register_restart_notifier); diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h index 2271a84124b0..509087750eeb 100644 --- a/drivers/clk/rockchip/clk.h +++ b/drivers/clk/rockchip/clk.h @@ -37,12 +37,25 @@ struct clk; #define BOOST_SWITCH_THRESHOLD 0x0024 #define BOOST_FSM_STATUS 0x0028 #define BOOST_PLL_L_CON(x) ((x) * 0x4 + 0x2c) +#define BOOST_PLL_CON_MASK 0xffff +#define BOOST_CORE_DIV_MASK 0x1f +#define BOOST_CORE_DIV_SHIFT 0 +#define BOOST_BACKUP_PLL_MASK 0x3 +#define BOOST_BACKUP_PLL_SHIFT 8 +#define BOOST_BACKUP_PLL_USAGE_MASK 0x1 +#define BOOST_BACKUP_PLL_USAGE_SHIFT 12 +#define BOOST_BACKUP_PLL_USAGE_BORROW 0 +#define BOOST_BACKUP_PLL_USAGE_TARGET 1 +#define BOOST_ENABLE_MASK 0x1 +#define BOOST_ENABLE_SHIFT 0 #define BOOST_RECOVERY_MASK 0x1 #define BOOST_RECOVERY_SHIFT 1 #define BOOST_SW_CTRL_MASK 0x1 #define BOOST_SW_CTRL_SHIFT 2 #define BOOST_LOW_FREQ_EN_MASK 0x1 #define BOOST_LOW_FREQ_EN_SHIFT 3 +#define BOOST_STATIS_ENABLE_MASK 0x1 +#define BOOST_STATIS_ENABLE_SHIFT 4 #define BOOST_BUSY_STATE BIT(8) #define PX30_PLL_CON(x) ((x) * 0x4) @@ -79,6 +92,51 @@ struct clk; #define RV1108_EMMC_CON0 0x1e8 #define RV1108_EMMC_CON1 0x1ec +#define RV1126_PMU_MODE 0x0 +#define RV1126_PMU_PLL_CON(x) ((x) * 0x4 + 0x10) +#define RV1126_PMU_CLKSEL_CON(x) ((x) * 0x4 + 0x100) +#define RV1126_PMU_CLKGATE_CON(x) ((x) * 0x4 + 0x180) +#define RV1126_PMU_SOFTRST_CON(x) ((x) * 0x4 + 0x200) +#define RV1126_PLL_CON(x) ((x) * 0x4) +#define RV1126_MODE_CON 0x90 +#define RV1126_CLKSEL_CON(x) ((x) * 0x4 + 0x100) +#define RV1126_CLKGATE_CON(x) ((x) * 0x4 + 0x280) +#define RV1126_SOFTRST_CON(x) ((x) * 0x4 + 0x300) +#define RV1126_GLB_SRST_FST 0x408 +#define RV1126_GLB_SRST_SND 0x40c +#define RV1126_SDMMC_CON0 0x440 +#define RV1126_SDMMC_CON1 0x444 +#define RV1126_SDIO_CON0 0x448 +#define RV1126_SDIO_CON1 0x44c +#define RV1126_EMMC_CON0 0x450 +#define RV1126_EMMC_CON1 0x454 + +/* + * register positions shared by RK1808 RK2928, RK3036, + * RK3066, RK3188 and RK3228 + */ + +#define RK1808_PLL_CON(x) ((x) * 0x4) +#define RK1808_MODE_CON 0xa0 +#define RK1808_MISC_CON 0xa4 +#define RK1808_MISC1_CON 0xa8 +#define RK1808_GLB_SRST_FST 0xb8 +#define RK1808_GLB_SRST_SND 0xbc +#define RK1808_CLKSEL_CON(x) ((x) * 0x4 + 0x100) +#define RK1808_CLKGATE_CON(x) ((x) * 0x4 + 0x230) +#define RK1808_SOFTRST_CON(x) ((x) * 0x4 + 0x300) +#define RK1808_SDMMC_CON0 0x380 +#define RK1808_SDMMC_CON1 0x384 +#define RK1808_SDIO_CON0 0x388 +#define RK1808_SDIO_CON1 0x38c +#define RK1808_EMMC_CON0 0x390 +#define RK1808_EMMC_CON1 0x394 + +#define RK1808_PMU_PLL_CON(x) ((x) * 0x4 + 0x4000) +#define RK1808_PMU_MODE_CON 0x4020 +#define RK1808_PMU_CLKSEL_CON(x) ((x) * 0x4 + 0x4040) +#define RK1808_PMU_CLKGATE_CON(x) ((x) * 0x4 + 0x4080) + #define RK2928_PLL_CON(x) ((x) * 0x4) #define RK2928_MODE_CON 0x40 #define RK2928_CLKSEL_CON(x) ((x) * 0x4 + 0x44) @@ -188,6 +246,34 @@ struct clk; #define RK3399_PMU_CLKGATE_CON(x) ((x) * 0x4 + 0x100) #define RK3399_PMU_SOFTRST_CON(x) ((x) * 0x4 + 0x110) +#define RK3568_PLL_CON(x) RK2928_PLL_CON(x) +#define RK3568_MODE_CON0 0xc0 +#define RK3568_MISC_CON0 0xc4 +#define RK3568_MISC_CON1 0xc8 +#define RK3568_MISC_CON2 0xcc +#define RK3568_GLB_CNT_TH 0xd0 +#define RK3568_GLB_SRST_FST 0xd4 +#define RK3568_GLB_SRST_SND 0xd8 +#define RK3568_GLB_RST_CON 0xdc +#define RK3568_GLB_RST_ST 0xe0 +#define RK3568_CLKSEL_CON(x) ((x) * 0x4 + 0x100) +#define RK3568_CLKGATE_CON(x) ((x) * 0x4 + 0x300) +#define RK3568_SOFTRST_CON(x) ((x) * 0x4 + 0x400) +#define RK3568_SDMMC0_CON0 0x580 +#define RK3568_SDMMC0_CON1 0x584 +#define RK3568_SDMMC1_CON0 0x588 +#define RK3568_SDMMC1_CON1 0x58c +#define RK3568_SDMMC2_CON0 0x590 +#define RK3568_SDMMC2_CON1 0x594 +#define RK3568_EMMC_CON0 0x598 +#define RK3568_EMMC_CON1 0x59c + +#define RK3568_PMU_PLL_CON(x) RK2928_PLL_CON(x) +#define RK3568_PMU_MODE_CON0 0x80 +#define RK3568_PMU_CLKSEL_CON(x) ((x) * 0x4 + 0x100) +#define RK3568_PMU_CLKGATE_CON(x) ((x) * 0x4 + 0x180) +#define RK3568_PMU_SOFTRST_CON(x) ((x) * 0x4 + 0x200) + enum rockchip_pll_type { pll_rk3036, pll_rk3066, @@ -238,22 +324,30 @@ struct rockchip_clk_provider { struct clk_onecell_data clk_data; struct device_node *cru_node; struct regmap *grf; + struct regmap *pmugrf; spinlock_t lock; }; struct rockchip_pll_rate_table { unsigned long rate; - unsigned int nr; - unsigned int nf; - unsigned int no; - unsigned int nb; - /* for RK3036/RK3399 */ - unsigned int fbdiv; - unsigned int postdiv1; - unsigned int refdiv; - unsigned int postdiv2; - unsigned int dsmpd; - unsigned int frac; + union { + struct { + /* for RK3066 */ + unsigned int nr; + unsigned int nf; + unsigned int no; + unsigned int nb; + }; + struct { + /* for RK3036/RK3399 */ + unsigned int fbdiv; + unsigned int postdiv1; + unsigned int refdiv; + unsigned int postdiv2; + unsigned int dsmpd; + unsigned int frac; + }; + }; }; /** @@ -317,12 +411,21 @@ struct clk *rockchip_clk_register_pll(struct rockchip_clk_provider *ctx, struct rockchip_pll_rate_table *rate_table, unsigned long flags, u8 clk_pll_flags); +void rockchip_boost_init(struct clk_hw *hw); + +void rockchip_boost_enable_recovery_sw_low(struct clk_hw *hw); + +void rockchip_boost_disable_recovery_sw(struct clk_hw *hw); + +void rockchip_boost_add_core_div(struct clk_hw *hw, unsigned long prate); + struct rockchip_cpuclk_clksel { int reg; u32 val; }; -#define ROCKCHIP_CPUCLK_NUM_DIVIDERS 2 +#define ROCKCHIP_CPUCLK_NUM_DIVIDERS 5 +#define ROCKCHIP_CPUCLK_MAX_CORES 4 struct rockchip_cpuclk_rate_table { unsigned long prate; struct rockchip_cpuclk_clksel divs[ROCKCHIP_CPUCLK_NUM_DIVIDERS]; @@ -330,26 +433,29 @@ struct rockchip_cpuclk_rate_table { /** * struct rockchip_cpuclk_reg_data - register offsets and masks of the cpuclock - * @core_reg: register offset of the core settings register - * @div_core_shift: core divider offset used to divide the pll value - * @div_core_mask: core divider mask - * @mux_core_alt: mux value to select alternate parent + * @core_reg[]: register offset of the cores setting register + * @div_core_shift[]: cores divider offset used to divide the pll value + * @div_core_mask[]: cores divider mask + * @num_cores: number of cpu cores * @mux_core_main: mux value to select main parent of core * @mux_core_shift: offset of the core multiplexer * @mux_core_mask: core multiplexer mask */ struct rockchip_cpuclk_reg_data { - int core_reg; - u8 div_core_shift; - u32 div_core_mask; - u8 mux_core_alt; - u8 mux_core_main; - u8 mux_core_shift; - u32 mux_core_mask; + int core_reg[ROCKCHIP_CPUCLK_MAX_CORES]; + u8 div_core_shift[ROCKCHIP_CPUCLK_MAX_CORES]; + u32 div_core_mask[ROCKCHIP_CPUCLK_MAX_CORES]; + int num_cores; + u8 mux_core_alt; + u8 mux_core_main; + u8 mux_core_shift; + u32 mux_core_mask; + const char *pll_name; }; struct clk *rockchip_clk_register_cpuclk(const char *name, - const char *const *parent_names, u8 num_parents, + u8 num_parents, + struct clk *parent, struct clk *alt_parent, const struct rockchip_cpuclk_reg_data *reg_data, const struct rockchip_cpuclk_rate_table *rates, int nrates, void __iomem *reg_base, spinlock_t *lock); @@ -361,16 +467,21 @@ struct clk *rockchip_clk_register_mmc(const char *name, /* * DDRCLK flags, including method of setting the rate * ROCKCHIP_DDRCLK_SIP: use SIP call to bl31 to change ddrclk rate. + * ROCKCHIP_DDRCLK_SCPI: use SCPI APIs to let mcu change ddrclk rate. */ #define ROCKCHIP_DDRCLK_SIP BIT(0) +#define ROCKCHIP_DDRCLK_SCPI 0x02 +#define ROCKCHIP_DDRCLK_SIP_V2 0x03 + +void rockchip_set_ddrclk_params(void __iomem *params); +void rockchip_set_ddrclk_dmcfreq_wait_complete(int (*func)(void)); struct clk *rockchip_clk_register_ddrclk(const char *name, int flags, const char *const *parent_names, u8 num_parents, int mux_offset, int mux_shift, int mux_width, int div_shift, int div_width, - int ddr_flags, void __iomem *reg_base, - spinlock_t *lock); + int ddr_flags, void __iomem *reg_base); #define ROCKCHIP_INVERTER_HIWORD_MASK BIT(0) @@ -388,8 +499,10 @@ struct clk *rockchip_clk_register_muxgrf(const char *name, enum rockchip_clk_branch_type { branch_composite, + branch_composite_brother, branch_mux, branch_muxgrf, + branch_muxpmugrf, branch_divider, branch_fraction_divider, branch_gate, @@ -398,6 +511,7 @@ enum rockchip_clk_branch_type { branch_factor, branch_ddrclk, branch_half_divider, + branch_dclk_divider, }; struct rockchip_clk_branch { @@ -411,6 +525,7 @@ struct rockchip_clk_branch { u8 mux_shift; u8 mux_width; u8 mux_flags; + u32 *mux_table; int div_offset; u8 div_shift; u8 div_width; @@ -420,6 +535,7 @@ struct rockchip_clk_branch { u8 gate_shift; u8 gate_flags; struct rockchip_clk_branch *child; + unsigned long max_prate; }; #define COMPOSITE(_id, cname, pnames, f, mo, ms, mw, mf, ds, dw,\ @@ -443,6 +559,50 @@ struct rockchip_clk_branch { .gate_flags = gf, \ } +#define COMPOSITE_BROTHER(_id, cname, pnames, f, mo, ms, mw, mf,\ + ds, dw, df, go, gs, gf, bro) \ + { \ + .id = _id, \ + .branch_type = branch_composite_brother, \ + .name = cname, \ + .parent_names = pnames, \ + .num_parents = ARRAY_SIZE(pnames), \ + .flags = f, \ + .muxdiv_offset = mo, \ + .mux_shift = ms, \ + .mux_width = mw, \ + .mux_flags = mf, \ + .div_shift = ds, \ + .div_width = dw, \ + .div_flags = df, \ + .gate_offset = go, \ + .gate_shift = gs, \ + .gate_flags = gf, \ + .child = bro, \ + } + +#define COMPOSITE_MUXTBL(_id, cname, pnames, f, mo, ms, mw, mf, \ + mt, ds, dw, df, go, gs, gf) \ + { \ + .id = _id, \ + .branch_type = branch_composite, \ + .name = cname, \ + .parent_names = pnames, \ + .num_parents = ARRAY_SIZE(pnames), \ + .flags = f, \ + .muxdiv_offset = mo, \ + .mux_shift = ms, \ + .mux_width = mw, \ + .mux_flags = mf, \ + .mux_table = mt, \ + .div_shift = ds, \ + .div_width = dw, \ + .div_flags = df, \ + .gate_offset = go, \ + .gate_shift = gs, \ + .gate_flags = gf, \ + } + #define COMPOSITE_DIV_OFFSET(_id, cname, pnames, f, mo, ms, mw, \ mf, do, ds, dw, df, go, gs, gf) \ { \ @@ -539,6 +699,26 @@ struct rockchip_clk_branch { .gate_offset = -1, \ } +#define COMPOSITE_BROTHER_NOGATE(_id, cname, pnames, f, mo, ms, \ + mw, mf, ds, dw, df, bro) \ + { \ + .id = _id, \ + .branch_type = branch_composite_brother, \ + .name = cname, \ + .parent_names = pnames, \ + .num_parents = ARRAY_SIZE(pnames), \ + .flags = f, \ + .muxdiv_offset = mo, \ + .mux_shift = ms, \ + .mux_width = mw, \ + .mux_flags = mf, \ + .div_shift = ds, \ + .div_width = dw, \ + .div_flags = df, \ + .gate_offset = -1, \ + .child = bro, \ + } + #define COMPOSITE_NOGATE_DIVTBL(_id, cname, pnames, f, mo, ms, \ mw, mf, ds, dw, df, dt) \ { \ @@ -559,7 +739,7 @@ struct rockchip_clk_branch { .gate_offset = -1, \ } -#define COMPOSITE_FRAC(_id, cname, pname, f, mo, df, go, gs, gf)\ +#define COMPOSITE_FRAC(_id, cname, pname, f, mo, df, go, gs, gf, prate)\ { \ .id = _id, \ .branch_type = branch_fraction_divider, \ @@ -574,9 +754,10 @@ struct rockchip_clk_branch { .gate_offset = go, \ .gate_shift = gs, \ .gate_flags = gf, \ + .max_prate = prate, \ } -#define COMPOSITE_FRACMUX(_id, cname, pname, f, mo, df, go, gs, gf, ch) \ +#define COMPOSITE_FRACMUX(_id, cname, pname, f, mo, df, go, gs, gf, ch, prate) \ { \ .id = _id, \ .branch_type = branch_fraction_divider, \ @@ -592,9 +773,10 @@ struct rockchip_clk_branch { .gate_shift = gs, \ .gate_flags = gf, \ .child = ch, \ + .max_prate = prate, \ } -#define COMPOSITE_FRACMUX_NOGATE(_id, cname, pname, f, mo, df, ch) \ +#define COMPOSITE_FRACMUX_NOGATE(_id, cname, pname, f, mo, df, ch, prate) \ { \ .id = _id, \ .branch_type = branch_fraction_divider, \ @@ -608,6 +790,7 @@ struct rockchip_clk_branch { .div_flags = df, \ .gate_offset = -1, \ .child = ch, \ + .max_prate = prate, \ } #define COMPOSITE_DDRCLK(_id, cname, pnames, f, mo, ms, mw, \ @@ -643,6 +826,22 @@ struct rockchip_clk_branch { .gate_offset = -1, \ } +#define MUXTBL(_id, cname, pnames, f, o, s, w, mf, mt) \ + { \ + .id = _id, \ + .branch_type = branch_mux, \ + .name = cname, \ + .parent_names = pnames, \ + .num_parents = ARRAY_SIZE(pnames), \ + .flags = f, \ + .muxdiv_offset = o, \ + .mux_shift = s, \ + .mux_width = w, \ + .mux_flags = mf, \ + .gate_offset = -1, \ + .mux_table = mt, \ + } + #define MUXGRF(_id, cname, pnames, f, o, s, w, mf) \ { \ .id = _id, \ @@ -658,6 +857,21 @@ struct rockchip_clk_branch { .gate_offset = -1, \ } +#define MUXPMUGRF(_id, cname, pnames, f, o, s, w, mf) \ + { \ + .id = _id, \ + .branch_type = branch_muxpmugrf, \ + .name = cname, \ + .parent_names = pnames, \ + .num_parents = ARRAY_SIZE(pnames), \ + .flags = f, \ + .muxdiv_offset = o, \ + .mux_shift = s, \ + .mux_width = w, \ + .mux_flags = mf, \ + .gate_offset = -1, \ + } + #define DIV(_id, cname, pname, f, o, s, w, df) \ { \ .id = _id, \ @@ -772,6 +986,28 @@ struct rockchip_clk_branch { .gate_flags = gf, \ } +#define COMPOSITE_HALFDIV_OFFSET(_id, cname, pnames, f, mo, ms, mw, mf, do,\ + ds, dw, df, go, gs, gf) \ + { \ + .id = _id, \ + .branch_type = branch_half_divider, \ + .name = cname, \ + .parent_names = pnames, \ + .num_parents = ARRAY_SIZE(pnames), \ + .flags = f, \ + .muxdiv_offset = mo, \ + .mux_shift = ms, \ + .mux_width = mw, \ + .mux_flags = mf, \ + .div_offset = do, \ + .div_shift = ds, \ + .div_width = dw, \ + .div_flags = df, \ + .gate_offset = go, \ + .gate_shift = gs, \ + .gate_flags = gf, \ + } + #define COMPOSITE_NOGATE_HALFDIV(_id, cname, pnames, f, mo, ms, mw, mf, \ ds, dw, df) \ { \ @@ -824,6 +1060,28 @@ struct rockchip_clk_branch { .gate_offset = -1, \ } +#define COMPOSITE_DCLK(_id, cname, pnames, f, mo, ms, mw, mf, ds, dw,\ + df, go, gs, gf, prate) \ + { \ + .id = _id, \ + .branch_type = branch_dclk_divider, \ + .name = cname, \ + .parent_names = pnames, \ + .num_parents = ARRAY_SIZE(pnames), \ + .flags = f, \ + .muxdiv_offset = mo, \ + .mux_shift = ms, \ + .mux_width = mw, \ + .mux_flags = mf, \ + .div_shift = ds, \ + .div_width = dw, \ + .div_flags = df, \ + .gate_offset = go, \ + .gate_shift = gs, \ + .gate_flags = gf, \ + .max_prate = prate, \ + } + /* SGRF clocks are only accessible from secure mode, so not controllable */ #define SGRF_GATE(_id, cname, pname) \ FACTOR(_id, cname, pname, 0, 1, 1) @@ -840,13 +1098,17 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx, void rockchip_clk_register_plls(struct rockchip_clk_provider *ctx, struct rockchip_pll_clock *pll_list, unsigned int nr_pll, int grf_lock_offset); -void rockchip_clk_register_armclk(struct rockchip_clk_provider *ctx, - unsigned int lookup_id, const char *name, - const char *const *parent_names, u8 num_parents, - const struct rockchip_cpuclk_reg_data *reg_data, - const struct rockchip_cpuclk_rate_table *rates, - int nrates); -void rockchip_clk_protect_critical(const char *const clocks[], int nclocks); +void __init rockchip_clk_register_armclk(struct rockchip_clk_provider *ctx, + unsigned int lookup_id, + const char *name, + u8 num_parents, + struct clk *parent, struct clk *alt_parent, + const struct rockchip_cpuclk_reg_data *reg_data, + const struct rockchip_cpuclk_rate_table *rates, + int nrates); +int rockchip_pll_clk_rate_to_scale(struct clk *clk, unsigned long rate); +int rockchip_pll_clk_scale_to_rate(struct clk *clk, unsigned int scale); +int rockchip_pll_clk_adaptive_scaling(struct clk *clk, int sel); void rockchip_register_restart_notifier(struct rockchip_clk_provider *ctx, unsigned int reg, void (*cb)(void)); @@ -857,12 +1119,27 @@ struct clk *rockchip_clk_register_halfdiv(const char *name, u8 num_parents, void __iomem *base, int muxdiv_offset, u8 mux_shift, u8 mux_width, u8 mux_flags, - u8 div_shift, u8 div_width, - u8 div_flags, int gate_offset, - u8 gate_shift, u8 gate_flags, - unsigned long flags, + int div_offset, u8 div_shift, + u8 div_width, u8 div_flags, + int gate_offset, u8 gate_shift, + u8 gate_flags, unsigned long flags, spinlock_t *lock); +struct clk *rockchip_clk_register_dclk_branch(const char *name, + const char *const *parent_names, + u8 num_parents, + void __iomem *base, + int muxdiv_offset, u8 mux_shift, + u8 mux_width, u8 mux_flags, + int div_offset, u8 div_shift, + u8 div_width, u8 div_flags, + struct clk_div_table *div_table, + int gate_offset, + u8 gate_shift, u8 gate_flags, + unsigned long flags, + unsigned long max_prate, + spinlock_t *lock); + #ifdef CONFIG_RESET_CONTROLLER void rockchip_register_softrst(struct device_node *np, unsigned int num_regs, @@ -874,5 +1151,6 @@ static inline void rockchip_register_softrst(struct device_node *np, { } #endif +extern void (*rk_dump_cru)(void); #endif diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index a0c6e88bebe0..9d9cb5757913 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -85,7 +85,9 @@ config IXP4XX_TIMER Enables support for the Intel XScale IXP4xx SoC timer. config ROCKCHIP_TIMER - bool "Rockchip timer driver" if COMPILE_TEST + tristate "Rockchip timer driver" + default ARCH_ROCKCHIP + depends on ARCH_ROCKCHIP || COMPILE_TEST depends on ARM || ARM64 select TIMER_OF select CLKSRC_MMIO diff --git a/drivers/clocksource/timer-rockchip.c b/drivers/clocksource/timer-rockchip.c index 1f95d0aca08f..2f4e970d7433 100644 --- a/drivers/clocksource/timer-rockchip.c +++ b/drivers/clocksource/timer-rockchip.c @@ -8,11 +8,13 @@ #include #include #include +#include #include #include #include #include #include +#include #define TIMER_NAME "rk_timer" @@ -45,7 +47,9 @@ struct rk_clkevt { }; static struct rk_clkevt *rk_clkevt; +#ifndef MODULE static struct rk_timer *rk_clksrc; +#endif static inline struct rk_timer *rk_timer(struct clock_event_device *ce) { @@ -119,10 +123,12 @@ static irqreturn_t rk_timer_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } +#ifndef MODULE static u64 notrace rk_timer_sched_read(void) { return ~readl_relaxed(rk_clksrc->base + TIMER_CURRENT_VALUE0); } +#endif static int __init rk_timer_probe(struct rk_timer *timer, struct device_node *np) @@ -250,6 +256,7 @@ static int __init rk_clkevt_init(struct device_node *np) return ret; } +#ifndef MODULE static int __init rk_clksrc_init(struct device_node *np) { int ret = -EINVAL; @@ -287,14 +294,17 @@ static int __init rk_clksrc_init(struct device_node *np) rk_clksrc = ERR_PTR(ret); return ret; } +#endif static int __init rk_timer_init(struct device_node *np) { if (!rk_clkevt) return rk_clkevt_init(np); +#ifndef MODULE if (!rk_clksrc) return rk_clksrc_init(np); +#endif pr_err("Too many timer definitions for '%s'\n", TIMER_NAME); return -EINVAL; @@ -302,3 +312,26 @@ static int __init rk_timer_init(struct device_node *np) TIMER_OF_DECLARE(rk3288_timer, "rockchip,rk3288-timer", rk_timer_init); TIMER_OF_DECLARE(rk3399_timer, "rockchip,rk3399-timer", rk_timer_init); + +#ifdef MODULE +static int __init rk_timer_driver_probe(struct platform_device *pdev) +{ + return rk_timer_init(pdev->dev.of_node); +} + +static const struct of_device_id rk_timer_match_table[] = { + { .compatible = "rockchip,rk3288-timer" }, + { .compatible = "rockchip,rk3399-timer" }, + { /* sentinel */ }, +}; + +static struct platform_driver rk_timer_driver = { + .driver = { + .name = TIMER_NAME, + .of_match_table = rk_timer_match_table, + }, +}; +module_platform_driver_probe(rk_timer_driver, rk_timer_driver_probe); + +MODULE_LICENSE("GPL"); +#endif diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 1f73fa75b1a0..0faef5fc6df3 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -158,6 +158,16 @@ config ARM_RASPBERRYPI_CPUFREQ If in doubt, say N. +config ARM_ROCKCHIP_CPUFREQ + tristate "Rockchip CPUfreq driver" + depends on ARCH_ROCKCHIP && CPUFREQ_DT + select PM_OPP + help + This adds the CPUFreq driver support for Rockchip SoCs, + based on cpufreq-dt. + + If in doubt, say N. + config ARM_S3C_CPUFREQ bool help diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index f1b7e3dd6e5d..ed260af8be8d 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -5,7 +5,7 @@ obj-$(CONFIG_CPU_FREQ) += cpufreq.o freq_table.o # CPUfreq stats obj-$(CONFIG_CPU_FREQ_STAT) += cpufreq_stats.o -# CPUfreq governors +# CPUfreq governors obj-$(CONFIG_CPU_FREQ_GOV_PERFORMANCE) += cpufreq_performance.o obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o @@ -64,6 +64,7 @@ obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o obj-$(CONFIG_ARM_QCOM_CPUFREQ_HW) += qcom-cpufreq-hw.o obj-$(CONFIG_ARM_QCOM_CPUFREQ_NVMEM) += qcom-cpufreq-nvmem.o obj-$(CONFIG_ARM_RASPBERRYPI_CPUFREQ) += raspberrypi-cpufreq.o +obj-$(CONFIG_ARM_ROCKCHIP_CPUFREQ) += rockchip-cpufreq.o obj-$(CONFIG_ARM_S3C2410_CPUFREQ) += s3c2410-cpufreq.o obj-$(CONFIG_ARM_S3C2412_CPUFREQ) += s3c2412-cpufreq.o obj-$(CONFIG_ARM_S3C2416_CPUFREQ) += s3c2416-cpufreq.o diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index a3734014db47..0b91e3616a11 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -66,21 +66,6 @@ static const struct of_device_id whitelist[] __initconst = { { .compatible = "renesas,r8a7794", }, { .compatible = "renesas,sh73a0", }, - { .compatible = "rockchip,rk2928", }, - { .compatible = "rockchip,rk3036", }, - { .compatible = "rockchip,rk3066a", }, - { .compatible = "rockchip,rk3066b", }, - { .compatible = "rockchip,rk3188", }, - { .compatible = "rockchip,rk3228", }, - { .compatible = "rockchip,rk3288", }, - { .compatible = "rockchip,rk3328", }, - { .compatible = "rockchip,rk3366", }, - { .compatible = "rockchip,rk3368", }, - { .compatible = "rockchip,rk3399", - .data = &(struct cpufreq_dt_platform_data) - { .have_governor_per_policy = true, }, - }, - { .compatible = "st-ericsson,u8500", }, { .compatible = "st-ericsson,u8540", }, { .compatible = "st-ericsson,u9500", }, @@ -137,6 +122,28 @@ static const struct of_device_id blacklist[] __initconst = { { .compatible = "qcom,sc7180", }, { .compatible = "qcom,sdm845", }, { .compatible = "qcom,sm8150", }, + { .compatible = "rockchip,px30", }, + { .compatible = "rockchip,rk2928", }, + { .compatible = "rockchip,rk3036", }, + { .compatible = "rockchip,rk3066a", }, + { .compatible = "rockchip,rk3066b", }, + { .compatible = "rockchip,rk3126", }, + { .compatible = "rockchip,rk3128", }, + { .compatible = "rockchip,rk3188", }, + { .compatible = "rockchip,rk3228", }, + { .compatible = "rockchip,rk3229", }, + { .compatible = "rockchip,rk3288", }, + { .compatible = "rockchip,rk3288w", }, + { .compatible = "rockchip,rk3326", }, + { .compatible = "rockchip,rk3328", }, + { .compatible = "rockchip,rk3366", }, + { .compatible = "rockchip,rk3368", }, + { .compatible = "rockchip,rk3399", }, + { .compatible = "rockchip,rk3399pro", }, + { .compatible = "rockchip,rk3566", }, + { .compatible = "rockchip,rk3568", }, + { .compatible = "rockchip,rv1109", }, + { .compatible = "rockchip,rv1126", }, { .compatible = "st,stih407", }, { .compatible = "st,stih410", }, diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index e363ae04aac6..f1327e7fe361 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -23,6 +23,9 @@ #include #include "cpufreq-dt.h" +#ifdef CONFIG_ARCH_ROCKCHIP +#include "rockchip-cpufreq.h" +#endif struct private_data { struct list_head node; @@ -30,7 +33,7 @@ struct private_data { cpumask_var_t cpus; struct device *cpu_dev; struct opp_table *opp_table; - struct opp_table *reg_opp_table; + struct cpufreq_frequency_table *freq_table; bool have_static_opps; }; @@ -59,7 +62,11 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index) struct private_data *priv = policy->driver_data; unsigned long freq = policy->freq_table[index].frequency; +#ifdef CONFIG_ARCH_ROCKCHIP + return rockchip_cpufreq_opp_set_rate(priv->cpu_dev, freq * 1000); +#else return dev_pm_opp_set_rate(priv->cpu_dev, freq * 1000); +#endif } /* @@ -102,7 +109,6 @@ static const char *find_supply_name(struct device *dev) static int cpufreq_init(struct cpufreq_policy *policy) { - struct cpufreq_frequency_table *freq_table; struct private_data *priv; struct device *cpu_dev; struct clk *cpu_clk; @@ -114,9 +120,7 @@ static int cpufreq_init(struct cpufreq_policy *policy) pr_err("failed to find data for cpu%d\n", policy->cpu); return -ENODEV; } - cpu_dev = priv->cpu_dev; - cpumask_copy(policy->cpus, priv->cpus); cpu_clk = clk_get(cpu_dev, NULL); if (IS_ERR(cpu_clk)) { @@ -125,67 +129,32 @@ static int cpufreq_init(struct cpufreq_policy *policy) return ret; } - /* - * Initialize OPP tables for all policy->cpus. They will be shared by - * all CPUs which have marked their CPUs shared with OPP bindings. - * - * For platforms not using operating-points-v2 bindings, we do this - * before updating policy->cpus. Otherwise, we will end up creating - * duplicate OPPs for policy->cpus. - * - * OPPs might be populated at runtime, don't check for error here - */ - if (!dev_pm_opp_of_cpumask_add_table(policy->cpus)) - priv->have_static_opps = true; - - /* - * But we need OPP table to function so if it is not there let's - * give platform code chance to provide it for us. - */ - ret = dev_pm_opp_get_opp_count(cpu_dev); - if (ret <= 0) { - dev_err(cpu_dev, "OPP table can't be empty\n"); - ret = -ENODEV; - goto out_free_opp; - } - - ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); - if (ret) { - dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); - goto out_free_opp; - } + transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev); + if (!transition_latency) + transition_latency = CPUFREQ_ETERNAL; + cpumask_copy(policy->cpus, priv->cpus); policy->driver_data = priv; policy->clk = cpu_clk; - policy->freq_table = freq_table; - + policy->freq_table = priv->freq_table; policy->suspend_freq = dev_pm_opp_get_suspend_opp_freq(cpu_dev) / 1000; + policy->cpuinfo.transition_latency = transition_latency; + policy->dvfs_possible_from_any_cpu = true; /* Support turbo/boost mode */ if (policy_has_boost_freq(policy)) { /* This gets disabled by core on driver unregister */ ret = cpufreq_enable_boost_support(); if (ret) - goto out_free_cpufreq_table; + goto out_clk_put; cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs; } - transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev); - if (!transition_latency) - transition_latency = CPUFREQ_ETERNAL; - - policy->cpuinfo.transition_latency = transition_latency; - policy->dvfs_possible_from_any_cpu = true; - dev_pm_opp_of_register_em(cpu_dev, policy->cpus); return 0; -out_free_cpufreq_table: - dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); -out_free_opp: - if (priv->have_static_opps) - dev_pm_opp_of_cpumask_remove_table(policy->cpus); +out_clk_put: clk_put(cpu_clk); return ret; @@ -208,11 +177,6 @@ static int cpufreq_offline(struct cpufreq_policy *policy) static int cpufreq_exit(struct cpufreq_policy *policy) { - struct private_data *priv = policy->driver_data; - - dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); - if (priv->have_static_opps) - dev_pm_opp_of_cpumask_remove_table(policy->related_cpus); clk_put(policy->clk); return 0; } @@ -236,6 +200,7 @@ static int dt_cpufreq_early_init(struct device *dev, int cpu) { struct private_data *priv; struct device *cpu_dev; + bool fallback = false; const char *reg_name; int ret; @@ -254,68 +219,91 @@ static int dt_cpufreq_early_init(struct device *dev, int cpu) if (!alloc_cpumask_var(&priv->cpus, GFP_KERNEL)) return -ENOMEM; + cpumask_set_cpu(cpu, priv->cpus); priv->cpu_dev = cpu_dev; - /* Try to get OPP table early to ensure resources are available */ - priv->opp_table = dev_pm_opp_get_opp_table(cpu_dev); - if (IS_ERR(priv->opp_table)) { - ret = PTR_ERR(priv->opp_table); - if (ret != -EPROBE_DEFER) - dev_err(cpu_dev, "failed to get OPP table: %d\n", ret); - goto free_cpumask; - } - /* * OPP layer will be taking care of regulators now, but it needs to know * the name of the regulator first. */ reg_name = find_supply_name(cpu_dev); if (reg_name) { - priv->reg_opp_table = dev_pm_opp_set_regulators(cpu_dev, - ®_name, 1); - if (IS_ERR(priv->reg_opp_table)) { - ret = PTR_ERR(priv->reg_opp_table); + priv->opp_table = dev_pm_opp_set_regulators(cpu_dev, ®_name, + 1); + if (IS_ERR(priv->opp_table)) { + ret = PTR_ERR(priv->opp_table); if (ret != -EPROBE_DEFER) dev_err(cpu_dev, "failed to set regulators: %d\n", ret); - goto put_table; + goto free_cpumask; } } - /* Find OPP sharing information so we can fill pri->cpus here */ /* Get OPP-sharing information from "operating-points-v2" bindings */ ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, priv->cpus); if (ret) { if (ret != -ENOENT) - goto put_reg; + goto out; /* * operating-points-v2 not supported, fallback to all CPUs share * OPP for backward compatibility if the platform hasn't set * sharing CPUs. */ - if (dev_pm_opp_get_sharing_cpus(cpu_dev, priv->cpus)) { - cpumask_setall(priv->cpus); - - /* - * OPP tables are initialized only for cpu, do it for - * others as well. - */ - ret = dev_pm_opp_set_sharing_cpus(cpu_dev, priv->cpus); - if (ret) - dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n", - __func__, ret); - } + if (dev_pm_opp_get_sharing_cpus(cpu_dev, priv->cpus)) + fallback = true; + } + + /* + * Initialize OPP tables for all priv->cpus. They will be shared by + * all CPUs which have marked their CPUs shared with OPP bindings. + * + * For platforms not using operating-points-v2 bindings, we do this + * before updating priv->cpus. Otherwise, we will end up creating + * duplicate OPPs for the CPUs. + * + * OPPs might be populated at runtime, don't check for error here. + */ + if (!dev_pm_opp_of_cpumask_add_table(priv->cpus)) + priv->have_static_opps = true; + + /* + * The OPP table must be initialized, statically or dynamically, by this + * point. + */ + ret = dev_pm_opp_get_opp_count(cpu_dev); + if (ret <= 0) { + dev_err(cpu_dev, "OPP table can't be empty\n"); + ret = -ENODEV; + goto out; + } + + if (fallback) { + cpumask_setall(priv->cpus); + ret = dev_pm_opp_set_sharing_cpus(cpu_dev, priv->cpus); + if (ret) + dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n", + __func__, ret); + } + +#ifdef CONFIG_ARCH_ROCKCHIP + rockchip_cpufreq_adjust_power_scale(cpu_dev); +#endif + + ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &priv->freq_table); + if (ret) { + dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); + goto out; } list_add(&priv->node, &priv_list); return 0; -put_reg: - if (priv->reg_opp_table) - dev_pm_opp_put_regulators(priv->reg_opp_table); -put_table: - dev_pm_opp_put_opp_table(priv->opp_table); +out: + if (priv->have_static_opps) + dev_pm_opp_of_cpumask_remove_table(priv->cpus); + if (priv->opp_table) + dev_pm_opp_put_regulators(priv->opp_table); free_cpumask: free_cpumask_var(priv->cpus); return ret; @@ -326,9 +314,11 @@ static void dt_cpufreq_release(void) struct private_data *priv, *tmp; list_for_each_entry_safe(priv, tmp, &priv_list, node) { - if (priv->reg_opp_table) - dev_pm_opp_put_regulators(priv->reg_opp_table); - dev_pm_opp_put_opp_table(priv->opp_table); + dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &priv->freq_table); + if (priv->have_static_opps) + dev_pm_opp_of_cpumask_remove_table(priv->cpus); + if (priv->opp_table) + dev_pm_opp_put_regulators(priv->opp_table); free_cpumask_var(priv->cpus); list_del(&priv->node); } diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 58342390966b..ae35ef771900 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -688,8 +688,12 @@ static ssize_t show_##file_name \ return sprintf(buf, "%u\n", policy->object); \ } +static ssize_t show_cpuinfo_max_freq(struct cpufreq_policy *policy, char *buf) +{ + unsigned int max_freq = policy->cpuinfo.max_freq; + return sprintf(buf, "%u\n", max_freq); +} show_one(cpuinfo_min_freq, cpuinfo.min_freq); -show_one(cpuinfo_max_freq, cpuinfo.max_freq); show_one(cpuinfo_transition_latency, cpuinfo.transition_latency); show_one(scaling_min_freq, min); show_one(scaling_max_freq, max); @@ -2535,6 +2539,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, return ret; } +EXPORT_TRACEPOINT_SYMBOL_GPL(cpu_frequency_limits); /** * cpufreq_update_policy - Re-evaluate an existing cpufreq policy. diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c index 50a4d7846580..1f001d281718 100644 --- a/drivers/cpufreq/cpufreq_userspace.c +++ b/drivers/cpufreq/cpufreq_userspace.c @@ -78,20 +78,18 @@ static int cpufreq_userspace_policy_start(struct cpufreq_policy *policy) mutex_lock(&userspace_mutex); per_cpu(cpu_is_managed, policy->cpu) = 1; - *setspeed = policy->cur; + if (!*setspeed) + *setspeed = policy->cur; mutex_unlock(&userspace_mutex); return 0; } static void cpufreq_userspace_policy_stop(struct cpufreq_policy *policy) { - unsigned int *setspeed = policy->governor_data; - pr_debug("managing cpu %u stopped\n", policy->cpu); mutex_lock(&userspace_mutex); per_cpu(cpu_is_managed, policy->cpu) = 0; - *setspeed = 0; mutex_unlock(&userspace_mutex); } diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c index 4070e573bf43..557f59ac47a3 100644 --- a/drivers/cpuidle/driver.c +++ b/drivers/cpuidle/driver.c @@ -381,3 +381,4 @@ void cpuidle_driver_state_disabled(struct cpuidle_driver *drv, int idx, mutex_unlock(&cpuidle_lock); } +EXPORT_SYMBOL_GPL(cpuidle_driver_state_disabled); diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c index 29acaf48e575..0e51ed25665e 100644 --- a/drivers/cpuidle/governor.c +++ b/drivers/cpuidle/governor.c @@ -102,6 +102,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov) return ret; } +EXPORT_SYMBOL_GPL(cpuidle_register_governor); /** * cpuidle_governor_latency_req - Compute a latency constraint for CPU @@ -118,3 +119,4 @@ s64 cpuidle_governor_latency_req(unsigned int cpu) return (s64)device_req * NSEC_PER_USEC; } +EXPORT_SYMBOL_GPL(cpuidle_governor_latency_req); diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig index 37dc40d1fcfb..ab9c00a1b1c3 100644 --- a/drivers/devfreq/Kconfig +++ b/drivers/devfreq/Kconfig @@ -131,15 +131,20 @@ config ARM_TEGRA20_DEVFREQ It reads Memory Controller counters and adjusts the operating frequencies and voltages with OPP support. -config ARM_RK3399_DMC_DEVFREQ - tristate "ARM RK3399 DMC DEVFREQ Driver" +config ARM_ROCKCHIP_BUS_DEVFREQ + tristate "ARM ROCKCHIP BUS DEVFREQ Driver" + depends on ARCH_ROCKCHIP + help + This adds the DEVFREQ driver for the ROCKCHIP BUS. + +config ARM_ROCKCHIP_DMC_DEVFREQ + tristate "ARM ROCKCHIP DMC DEVFREQ Driver" depends on (ARCH_ROCKCHIP && HAVE_ARM_SMCCC) || \ (COMPILE_TEST && HAVE_ARM_SMCCC) select DEVFREQ_EVENT_ROCKCHIP_DFI - select DEVFREQ_GOV_SIMPLE_ONDEMAND select PM_DEVFREQ_EVENT help - This adds the DEVFREQ driver for the RK3399 DMC(Dynamic Memory Controller). + This adds the DEVFREQ driver for the ROCKCHIP DMC(Dynamic Memory Controller). It sets the frequency for the memory controller and reads the usage counts from hardware. diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile index 3ca1ad0ecb97..a5afa549284e 100644 --- a/drivers/devfreq/Makefile +++ b/drivers/devfreq/Makefile @@ -11,9 +11,12 @@ obj-$(CONFIG_DEVFREQ_GOV_PASSIVE) += governor_passive.o obj-$(CONFIG_ARM_EXYNOS_BUS_DEVFREQ) += exynos-bus.o obj-$(CONFIG_ARM_IMX_BUS_DEVFREQ) += imx-bus.o obj-$(CONFIG_ARM_IMX8M_DDRC_DEVFREQ) += imx8m-ddrc.o -obj-$(CONFIG_ARM_RK3399_DMC_DEVFREQ) += rk3399_dmc.o +obj-$(CONFIG_ARM_ROCKCHIP_BUS_DEVFREQ) += rockchip_bus.o +obj-$(CONFIG_ARM_ROCKCHIP_DMC_DEVFREQ) += rockchip_dmc.o rockchip_dmc_common.o obj-$(CONFIG_ARM_TEGRA_DEVFREQ) += tegra30-devfreq.o obj-$(CONFIG_ARM_TEGRA20_DEVFREQ) += tegra20-devfreq.o # DEVFREQ Event Drivers obj-$(CONFIG_PM_DEVFREQ_EVENT) += event/ + +ccflags-y +=-I$(KERNEL_SOURCE_PATH)/drivers/gpu/drm/ diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index c6f460550f5e..db0fe99c8d61 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -1761,6 +1761,40 @@ static ssize_t timer_store(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR_RW(timer); +static ssize_t load_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int err; + struct devfreq *devfreq = to_devfreq(dev); + struct devfreq_dev_status stat = devfreq->last_status; + unsigned long freq; + ssize_t len; + + err = devfreq_update_stats(devfreq); + if (err) + return err; + + if (stat.total_time < stat.busy_time) { + err = devfreq_update_stats(devfreq); + if (err) + return err; + }; + + if (!stat.total_time) + return 0; + + len = sprintf(buf, "%lu", stat.busy_time * 100 / stat.total_time); + + if (devfreq->profile->get_cur_freq && + !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq)) + len += sprintf(buf + len, "@%luHz\n", freq); + else + len += sprintf(buf + len, "@%luHz\n", devfreq->previous_freq); + + return len; +} +static DEVICE_ATTR_RO(load); + static struct attribute *devfreq_attrs[] = { &dev_attr_name.attr, &dev_attr_governor.attr, @@ -1773,6 +1807,7 @@ static struct attribute *devfreq_attrs[] = { &dev_attr_max_freq.attr, &dev_attr_trans_stat.attr, &dev_attr_timer.attr, + &dev_attr_load.attr, NULL, }; ATTRIBUTE_GROUPS(devfreq); diff --git a/drivers/devfreq/event/Kconfig b/drivers/devfreq/event/Kconfig index 878825372f6f..4526c69c602e 100644 --- a/drivers/devfreq/event/Kconfig +++ b/drivers/devfreq/event/Kconfig @@ -39,4 +39,11 @@ config DEVFREQ_EVENT_ROCKCHIP_DFI This add the devfreq-event driver for Rockchip SoC. It provides DFI (DDR Monitor Module) driver to count ddr load. +config DEVFREQ_EVENT_ROCKCHIP_NOCP + tristate "ROCKCHIP NoC (Network On Chip) Probe DEVFREQ event Driver" + depends on ARCH_ROCKCHIP + help + This add the devfreq-event driver for Rockchip SoC. It provides NoC + (Network on Chip) Probe counters to monitor traffic statistics. + endif # PM_DEVFREQ_EVENT diff --git a/drivers/devfreq/event/Makefile b/drivers/devfreq/event/Makefile index 3c847e5d5a35..03d67f06c22e 100644 --- a/drivers/devfreq/event/Makefile +++ b/drivers/devfreq/event/Makefile @@ -4,3 +4,4 @@ obj-$(CONFIG_DEVFREQ_EVENT_EXYNOS_NOCP) += exynos-nocp.o obj-$(CONFIG_DEVFREQ_EVENT_EXYNOS_PPMU) += exynos-ppmu.o obj-$(CONFIG_DEVFREQ_EVENT_ROCKCHIP_DFI) += rockchip-dfi.o +obj-$(CONFIG_DEVFREQ_EVENT_ROCKCHIP_NOCP) += rockchip-nocp.o diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c index 9a88faaf8b27..9fd6a82f79d6 100644 --- a/drivers/devfreq/event/rockchip-dfi.c +++ b/drivers/devfreq/event/rockchip-dfi.c @@ -20,23 +20,70 @@ #include -#define RK3399_DMC_NUM_CH 2 - +#define PX30_PMUGRF_OS_REG2 0x208 +#define PX30_PMUGRF_OS_REG3 0x20c + +#define RK3128_GRF_SOC_CON0 0x140 +#define RK3128_GRF_OS_REG1 0x1cc +#define RK3128_GRF_DFI_WRNUM 0x220 +#define RK3128_GRF_DFI_RDNUM 0x224 +#define RK3128_GRF_DFI_TIMERVAL 0x22c +#define RK3128_DDR_MONITOR_EN ((1 << (16 + 6)) + (1 << 6)) +#define RK3128_DDR_MONITOR_DISB ((1 << (16 + 6)) + (0 << 6)) + +#define RK3288_PMU_SYS_REG2 0x9c +#define RK3288_GRF_SOC_CON4 0x254 +#define RK3288_GRF_SOC_STATUS(n) (0x280 + (n) * 4) +#define RK3288_DFI_EN (0x30003 << 14) +#define RK3288_DFI_DIS (0x30000 << 14) +#define RK3288_LPDDR_SEL (0x10001 << 13) +#define RK3288_DDR3_SEL (0x10000 << 13) + +#define RK3328_GRF_OS_REG2 0x5d0 + +#define RK3368_GRF_DDRC0_CON0 0x600 +#define RK3368_GRF_SOC_STATUS5 0x494 +#define RK3368_GRF_SOC_STATUS6 0x498 +#define RK3368_GRF_SOC_STATUS8 0x4a0 +#define RK3368_GRF_SOC_STATUS9 0x4a4 +#define RK3368_GRF_SOC_STATUS10 0x4a8 +#define RK3368_DFI_EN (0x30003 << 5) +#define RK3368_DFI_DIS (0x30000 << 5) + +#define MAX_DMC_NUM_CH 2 +#define READ_DRAMTYPE_INFO(n) (((n) >> 13) & 0x7) +#define READ_CH_INFO(n) (((n) >> 28) & 0x3) +#define READ_DRAMTYPE_INFO_V3(n, m) ((((n) >> 13) & 0x7) | ((((m) >> 12) & 0x3) << 3)) +#define READ_SYSREG_VERSION(m) (((m) >> 28) & 0xf) /* DDRMON_CTRL */ -#define DDRMON_CTRL 0x04 -#define CLR_DDRMON_CTRL (0x1f0000 << 0) -#define LPDDR4_EN (0x10001 << 4) -#define HARDWARE_EN (0x10001 << 3) -#define LPDDR3_EN (0x10001 << 2) -#define SOFTWARE_EN (0x10001 << 1) -#define SOFTWARE_DIS (0x10000 << 1) -#define TIME_CNT_EN (0x10001 << 0) +#define DDRMON_CTRL 0x04 +#define CLR_DDRMON_CTRL (0x3f0000 << 0) +#define DDR4_EN (0x10001 << 5) +#define LPDDR4_EN (0x10001 << 4) +#define HARDWARE_EN (0x10001 << 3) +#define LPDDR2_3_EN (0x10001 << 2) +#define SOFTWARE_EN (0x10001 << 1) +#define SOFTWARE_DIS (0x10000 << 1) +#define TIME_CNT_EN (0x10001 << 0) #define DDRMON_CH0_COUNT_NUM 0x28 #define DDRMON_CH0_DFI_ACCESS_NUM 0x2c #define DDRMON_CH1_COUNT_NUM 0x3c #define DDRMON_CH1_DFI_ACCESS_NUM 0x40 +/* pmu grf */ +#define PMUGRF_OS_REG2 0x308 + +enum { + DDR4 = 0, + DDR3 = 3, + LPDDR2 = 5, + LPDDR3 = 6, + LPDDR4 = 7, + LPDDR4X = 8, + UNUSED = 0xFF +}; + struct dmc_usage { u32 access; u32 total; @@ -50,33 +97,261 @@ struct dmc_usage { struct rockchip_dfi { struct devfreq_event_dev *edev; struct devfreq_event_desc *desc; - struct dmc_usage ch_usage[RK3399_DMC_NUM_CH]; + struct dmc_usage ch_usage[MAX_DMC_NUM_CH]; struct device *dev; void __iomem *regs; struct regmap *regmap_pmu; + struct regmap *regmap_grf; + struct regmap *regmap_pmugrf; struct clk *clk; + u32 dram_type; + /* + * available mask, 1: available, 0: not available + * each bit represent a channel + */ + u32 ch_msk; +}; + +static void rk3128_dfi_start_hardware_counter(struct devfreq_event_dev *edev) +{ + struct rockchip_dfi *info = devfreq_event_get_drvdata(edev); + + regmap_write(info->regmap_grf, + RK3128_GRF_SOC_CON0, + RK3128_DDR_MONITOR_EN); +} + +static void rk3128_dfi_stop_hardware_counter(struct devfreq_event_dev *edev) +{ + struct rockchip_dfi *info = devfreq_event_get_drvdata(edev); + + regmap_write(info->regmap_grf, + RK3128_GRF_SOC_CON0, + RK3128_DDR_MONITOR_DISB); +} + +static int rk3128_dfi_disable(struct devfreq_event_dev *edev) +{ + rk3128_dfi_stop_hardware_counter(edev); + + return 0; +} + +static int rk3128_dfi_enable(struct devfreq_event_dev *edev) +{ + rk3128_dfi_start_hardware_counter(edev); + + return 0; +} + +static int rk3128_dfi_set_event(struct devfreq_event_dev *edev) +{ + return 0; +} + +static int rk3128_dfi_get_event(struct devfreq_event_dev *edev, + struct devfreq_event_data *edata) +{ + struct rockchip_dfi *info = devfreq_event_get_drvdata(edev); + unsigned long flags; + u32 dfi_wr, dfi_rd, dfi_timer; + + local_irq_save(flags); + + rk3128_dfi_stop_hardware_counter(edev); + + regmap_read(info->regmap_grf, RK3128_GRF_DFI_WRNUM, &dfi_wr); + regmap_read(info->regmap_grf, RK3128_GRF_DFI_RDNUM, &dfi_rd); + regmap_read(info->regmap_grf, RK3128_GRF_DFI_TIMERVAL, &dfi_timer); + + edata->load_count = (dfi_wr + dfi_rd) * 4; + edata->total_count = dfi_timer; + + rk3128_dfi_start_hardware_counter(edev); + + local_irq_restore(flags); + + return 0; +} + +static const struct devfreq_event_ops rk3128_dfi_ops = { + .disable = rk3128_dfi_disable, + .enable = rk3128_dfi_enable, + .get_event = rk3128_dfi_get_event, + .set_event = rk3128_dfi_set_event, +}; + +static void rk3288_dfi_start_hardware_counter(struct devfreq_event_dev *edev) +{ + struct rockchip_dfi *info = devfreq_event_get_drvdata(edev); + + regmap_write(info->regmap_grf, RK3288_GRF_SOC_CON4, RK3288_DFI_EN); +} + +static void rk3288_dfi_stop_hardware_counter(struct devfreq_event_dev *edev) +{ + struct rockchip_dfi *info = devfreq_event_get_drvdata(edev); + + regmap_write(info->regmap_grf, RK3288_GRF_SOC_CON4, RK3288_DFI_DIS); +} + +static int rk3288_dfi_disable(struct devfreq_event_dev *edev) +{ + rk3288_dfi_stop_hardware_counter(edev); + + return 0; +} + +static int rk3288_dfi_enable(struct devfreq_event_dev *edev) +{ + rk3288_dfi_start_hardware_counter(edev); + + return 0; +} + +static int rk3288_dfi_set_event(struct devfreq_event_dev *edev) +{ + return 0; +} + +static int rk3288_dfi_get_busier_ch(struct devfreq_event_dev *edev) +{ + struct rockchip_dfi *info = devfreq_event_get_drvdata(edev); + u32 tmp, max = 0; + u32 i, busier_ch = 0; + u32 rd_count, wr_count, total_count; + + rk3288_dfi_stop_hardware_counter(edev); + + /* Find out which channel is busier */ + for (i = 0; i < MAX_DMC_NUM_CH; i++) { + if (!(info->ch_msk & BIT(i))) + continue; + regmap_read(info->regmap_grf, + RK3288_GRF_SOC_STATUS(11 + i * 4), &wr_count); + regmap_read(info->regmap_grf, + RK3288_GRF_SOC_STATUS(12 + i * 4), &rd_count); + regmap_read(info->regmap_grf, + RK3288_GRF_SOC_STATUS(14 + i * 4), &total_count); + info->ch_usage[i].access = (wr_count + rd_count) * 4; + info->ch_usage[i].total = total_count; + tmp = info->ch_usage[i].access; + if (tmp > max) { + busier_ch = i; + max = tmp; + } + } + rk3288_dfi_start_hardware_counter(edev); + + return busier_ch; +} + +static int rk3288_dfi_get_event(struct devfreq_event_dev *edev, + struct devfreq_event_data *edata) +{ + struct rockchip_dfi *info = devfreq_event_get_drvdata(edev); + int busier_ch; + unsigned long flags; + + local_irq_save(flags); + busier_ch = rk3288_dfi_get_busier_ch(edev); + local_irq_restore(flags); + + edata->load_count = info->ch_usage[busier_ch].access; + edata->total_count = info->ch_usage[busier_ch].total; + + return 0; +} + +static const struct devfreq_event_ops rk3288_dfi_ops = { + .disable = rk3288_dfi_disable, + .enable = rk3288_dfi_enable, + .get_event = rk3288_dfi_get_event, + .set_event = rk3288_dfi_set_event, +}; + +static void rk3368_dfi_start_hardware_counter(struct devfreq_event_dev *edev) +{ + struct rockchip_dfi *info = devfreq_event_get_drvdata(edev); + + regmap_write(info->regmap_grf, RK3368_GRF_DDRC0_CON0, RK3368_DFI_EN); +} + +static void rk3368_dfi_stop_hardware_counter(struct devfreq_event_dev *edev) +{ + struct rockchip_dfi *info = devfreq_event_get_drvdata(edev); + + regmap_write(info->regmap_grf, RK3368_GRF_DDRC0_CON0, RK3368_DFI_DIS); +} + +static int rk3368_dfi_disable(struct devfreq_event_dev *edev) +{ + rk3368_dfi_stop_hardware_counter(edev); + + return 0; +} + +static int rk3368_dfi_enable(struct devfreq_event_dev *edev) +{ + rk3368_dfi_start_hardware_counter(edev); + + return 0; +} + +static int rk3368_dfi_set_event(struct devfreq_event_dev *edev) +{ + return 0; +} + +static int rk3368_dfi_get_event(struct devfreq_event_dev *edev, + struct devfreq_event_data *edata) +{ + struct rockchip_dfi *info = devfreq_event_get_drvdata(edev); + unsigned long flags; + u32 dfi0_wr, dfi0_rd, dfi1_wr, dfi1_rd, dfi_timer; + + local_irq_save(flags); + + rk3368_dfi_stop_hardware_counter(edev); + + regmap_read(info->regmap_grf, RK3368_GRF_SOC_STATUS5, &dfi0_wr); + regmap_read(info->regmap_grf, RK3368_GRF_SOC_STATUS6, &dfi0_rd); + regmap_read(info->regmap_grf, RK3368_GRF_SOC_STATUS9, &dfi1_wr); + regmap_read(info->regmap_grf, RK3368_GRF_SOC_STATUS10, &dfi1_rd); + regmap_read(info->regmap_grf, RK3368_GRF_SOC_STATUS8, &dfi_timer); + + edata->load_count = (dfi0_wr + dfi0_rd + dfi1_wr + dfi1_rd) * 2; + edata->total_count = dfi_timer; + + rk3368_dfi_start_hardware_counter(edev); + + local_irq_restore(flags); + + return 0; +} + +static const struct devfreq_event_ops rk3368_dfi_ops = { + .disable = rk3368_dfi_disable, + .enable = rk3368_dfi_enable, + .get_event = rk3368_dfi_get_event, + .set_event = rk3368_dfi_set_event, }; static void rockchip_dfi_start_hardware_counter(struct devfreq_event_dev *edev) { struct rockchip_dfi *info = devfreq_event_get_drvdata(edev); void __iomem *dfi_regs = info->regs; - u32 val; - u32 ddr_type; - - /* get ddr type */ - regmap_read(info->regmap_pmu, RK3399_PMUGRF_OS_REG2, &val); - ddr_type = (val >> RK3399_PMUGRF_DDRTYPE_SHIFT) & - RK3399_PMUGRF_DDRTYPE_MASK; /* clear DDRMON_CTRL setting */ writel_relaxed(CLR_DDRMON_CTRL, dfi_regs + DDRMON_CTRL); /* set ddr type to dfi */ - if (ddr_type == RK3399_PMUGRF_DDRTYPE_LPDDR3) - writel_relaxed(LPDDR3_EN, dfi_regs + DDRMON_CTRL); - else if (ddr_type == RK3399_PMUGRF_DDRTYPE_LPDDR4) + if (info->dram_type == LPDDR3 || info->dram_type == LPDDR2) + writel_relaxed(LPDDR2_3_EN, dfi_regs + DDRMON_CTRL); + else if (info->dram_type == LPDDR4 || info->dram_type == LPDDR4X) writel_relaxed(LPDDR4_EN, dfi_regs + DDRMON_CTRL); + else if (info->dram_type == DDR4) + writel_relaxed(DDR4_EN, dfi_regs + DDRMON_CTRL); /* enable count, use software mode */ writel_relaxed(SOFTWARE_EN, dfi_regs + DDRMON_CTRL); @@ -100,12 +375,22 @@ static int rockchip_dfi_get_busier_ch(struct devfreq_event_dev *edev) rockchip_dfi_stop_hardware_counter(edev); /* Find out which channel is busier */ - for (i = 0; i < RK3399_DMC_NUM_CH; i++) { - info->ch_usage[i].access = readl_relaxed(dfi_regs + - DDRMON_CH0_DFI_ACCESS_NUM + i * 20) * 4; + for (i = 0; i < MAX_DMC_NUM_CH; i++) { + if (!(info->ch_msk & BIT(i))) + continue; + info->ch_usage[i].total = readl_relaxed(dfi_regs + DDRMON_CH0_COUNT_NUM + i * 20); - tmp = info->ch_usage[i].access; + + /* LPDDR4 and LPDDR4X BL = 16,other DDR type BL = 8 */ + tmp = readl_relaxed(dfi_regs + + DDRMON_CH0_DFI_ACCESS_NUM + i * 20); + if (info->dram_type == LPDDR4 || info->dram_type == LPDDR4X) + tmp *= 8; + else + tmp *= 4; + info->ch_usage[i].access = tmp; + if (tmp > max) { busier_ch = i; max = tmp; @@ -121,7 +406,8 @@ static int rockchip_dfi_disable(struct devfreq_event_dev *edev) struct rockchip_dfi *info = devfreq_event_get_drvdata(edev); rockchip_dfi_stop_hardware_counter(edev); - clk_disable_unprepare(info->clk); + if (info->clk) + clk_disable_unprepare(info->clk); return 0; } @@ -131,10 +417,13 @@ static int rockchip_dfi_enable(struct devfreq_event_dev *edev) struct rockchip_dfi *info = devfreq_event_get_drvdata(edev); int ret; - ret = clk_prepare_enable(info->clk); - if (ret) { - dev_err(&edev->dev, "failed to enable dfi clk: %d\n", ret); - return ret; + if (info->clk) { + ret = clk_prepare_enable(info->clk); + if (ret) { + dev_err(&edev->dev, "failed to enable dfi clk: %d\n", + ret); + return ret; + } } rockchip_dfi_start_hardware_counter(edev); @@ -151,8 +440,11 @@ static int rockchip_dfi_get_event(struct devfreq_event_dev *edev, { struct rockchip_dfi *info = devfreq_event_get_drvdata(edev); int busier_ch; + unsigned long flags; + local_irq_save(flags); busier_ch = rockchip_dfi_get_busier_ch(edev); + local_irq_restore(flags); edata->load_count = info->ch_usage[busier_ch].access; edata->total_count = info->ch_usage[busier_ch].total; @@ -167,22 +459,120 @@ static const struct devfreq_event_ops rockchip_dfi_ops = { .set_event = rockchip_dfi_set_event, }; -static const struct of_device_id rockchip_dfi_id_match[] = { - { .compatible = "rockchip,rk3399-dfi" }, - { }, -}; -MODULE_DEVICE_TABLE(of, rockchip_dfi_id_match); +static __init int px30_dfi_init(struct platform_device *pdev, + struct rockchip_dfi *data, + struct devfreq_event_desc *desc) +{ + struct device_node *np = pdev->dev.of_node, *node; + struct resource *res; + u32 val_2, val_3; -static int rockchip_dfi_probe(struct platform_device *pdev) + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + data->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(data->regs)) + return PTR_ERR(data->regs); + + node = of_parse_phandle(np, "rockchip,pmugrf", 0); + if (node) { + data->regmap_pmugrf = syscon_node_to_regmap(node); + if (IS_ERR(data->regmap_pmugrf)) + return PTR_ERR(data->regmap_pmugrf); + } + + regmap_read(data->regmap_pmugrf, PX30_PMUGRF_OS_REG2, &val_2); + regmap_read(data->regmap_pmugrf, PX30_PMUGRF_OS_REG3, &val_3); + if (READ_SYSREG_VERSION(val_3) >= 0x3) + data->dram_type = READ_DRAMTYPE_INFO_V3(val_2, val_3); + else + data->dram_type = READ_DRAMTYPE_INFO(val_2); + data->ch_msk = 1; + data->clk = NULL; + + desc->ops = &rockchip_dfi_ops; + + return 0; +} + +static __init int rk3128_dfi_init(struct platform_device *pdev, + struct rockchip_dfi *data, + struct devfreq_event_desc *desc) { - struct device *dev = &pdev->dev; - struct rockchip_dfi *data; - struct devfreq_event_desc *desc; struct device_node *np = pdev->dev.of_node, *node; - data = devm_kzalloc(dev, sizeof(struct rockchip_dfi), GFP_KERNEL); - if (!data) - return -ENOMEM; + node = of_parse_phandle(np, "rockchip,grf", 0); + if (node) { + data->regmap_grf = syscon_node_to_regmap(node); + if (IS_ERR(data->regmap_grf)) + return PTR_ERR(data->regmap_grf); + } + + desc->ops = &rk3128_dfi_ops; + + return 0; +} + +static __init int rk3288_dfi_init(struct platform_device *pdev, + struct rockchip_dfi *data, + struct devfreq_event_desc *desc) +{ + struct device_node *np = pdev->dev.of_node, *node; + u32 val; + + node = of_parse_phandle(np, "rockchip,pmu", 0); + if (node) { + data->regmap_pmu = syscon_node_to_regmap(node); + if (IS_ERR(data->regmap_pmu)) + return PTR_ERR(data->regmap_pmu); + } + + node = of_parse_phandle(np, "rockchip,grf", 0); + if (node) { + data->regmap_grf = syscon_node_to_regmap(node); + if (IS_ERR(data->regmap_grf)) + return PTR_ERR(data->regmap_grf); + } + + regmap_read(data->regmap_pmu, RK3288_PMU_SYS_REG2, &val); + data->dram_type = READ_DRAMTYPE_INFO(val); + data->ch_msk = READ_CH_INFO(val); + + if (data->dram_type == DDR3) + regmap_write(data->regmap_grf, RK3288_GRF_SOC_CON4, + RK3288_DDR3_SEL); + else + regmap_write(data->regmap_grf, RK3288_GRF_SOC_CON4, + RK3288_LPDDR_SEL); + + desc->ops = &rk3288_dfi_ops; + + return 0; +} + +static __init int rk3368_dfi_init(struct platform_device *pdev, + struct rockchip_dfi *data, + struct devfreq_event_desc *desc) +{ + struct device *dev = &pdev->dev; + + if (!dev->parent || !dev->parent->of_node) + return -EINVAL; + + data->regmap_grf = syscon_node_to_regmap(dev->parent->of_node); + if (IS_ERR(data->regmap_grf)) + return PTR_ERR(data->regmap_grf); + + desc->ops = &rk3368_dfi_ops; + + return 0; +} + +static __init int rockchip_dfi_init(struct platform_device *pdev, + struct rockchip_dfi *data, + struct devfreq_event_desc *desc) +{ + struct device *dev = &pdev->dev; + struct device_node *np = pdev->dev.of_node, *node; + u32 val; data->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(data->regs)) @@ -202,23 +592,100 @@ static int rockchip_dfi_probe(struct platform_device *pdev) if (IS_ERR(data->regmap_pmu)) return PTR_ERR(data->regmap_pmu); } - data->dev = dev; + + regmap_read(data->regmap_pmu, PMUGRF_OS_REG2, &val); + data->dram_type = READ_DRAMTYPE_INFO(val); + data->ch_msk = READ_CH_INFO(val); + + desc->ops = &rockchip_dfi_ops; + + return 0; +} + +static __init int rk3328_dfi_init(struct platform_device *pdev, + struct rockchip_dfi *data, + struct devfreq_event_desc *desc) +{ + struct device_node *np = pdev->dev.of_node, *node; + struct resource *res; + u32 val; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + data->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(data->regs)) + return PTR_ERR(data->regs); + + node = of_parse_phandle(np, "rockchip,grf", 0); + if (node) { + data->regmap_grf = syscon_node_to_regmap(node); + if (IS_ERR(data->regmap_grf)) + return PTR_ERR(data->regmap_grf); + } + + regmap_read(data->regmap_grf, RK3328_GRF_OS_REG2, &val); + data->dram_type = READ_DRAMTYPE_INFO(val); + data->ch_msk = 1; + data->clk = NULL; + + desc->ops = &rockchip_dfi_ops; + + return 0; +} + +static const struct of_device_id rockchip_dfi_id_match[] = { + { .compatible = "rockchip,px30-dfi", .data = px30_dfi_init }, + { .compatible = "rockchip,rk1808-dfi", .data = px30_dfi_init }, + { .compatible = "rockchip,rk3128-dfi", .data = rk3128_dfi_init }, + { .compatible = "rockchip,rk3288-dfi", .data = rk3288_dfi_init }, + { .compatible = "rockchip,rk3328-dfi", .data = rk3328_dfi_init }, + { .compatible = "rockchip,rk3368-dfi", .data = rk3368_dfi_init }, + { .compatible = "rockchip,rk3399-dfi", .data = rockchip_dfi_init }, + { .compatible = "rockchip,rk3568-dfi", .data = px30_dfi_init }, + { .compatible = "rockchip,rv1126-dfi", .data = px30_dfi_init }, + { }, +}; + +static int rockchip_dfi_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct rockchip_dfi *data; + struct devfreq_event_desc *desc; + struct device_node *np = pdev->dev.of_node; + const struct of_device_id *match; + int (*init)(struct platform_device *pdev, struct rockchip_dfi *data, + struct devfreq_event_desc *desc); + + data = devm_kzalloc(dev, sizeof(struct rockchip_dfi), GFP_KERNEL); + if (!data) + return -ENOMEM; desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL); if (!desc) return -ENOMEM; - desc->ops = &rockchip_dfi_ops; + match = of_match_node(rockchip_dfi_id_match, pdev->dev.of_node); + if (match) { + init = match->data; + if (init) { + if (init(pdev, data, desc)) + return -EINVAL; + } else { + return 0; + } + } else { + return 0; + } + desc->driver_data = data; desc->name = np->name; - data->desc = desc; - data->edev = devm_devfreq_event_add_edev(&pdev->dev, desc); + data->edev = devm_devfreq_event_add_edev(dev, desc); if (IS_ERR(data->edev)) { - dev_err(&pdev->dev, - "failed to add devfreq-event device\n"); + dev_err(dev, "failed to add devfreq-event device\n"); return PTR_ERR(data->edev); } + data->desc = desc; + data->dev = &pdev->dev; platform_set_drvdata(pdev, data); diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig index 594b77d8923c..366c8aeffbb2 100644 --- a/drivers/dma-buf/Kconfig +++ b/drivers/dma-buf/Kconfig @@ -21,7 +21,6 @@ config SW_SYNC bool "Sync File Validation Framework" default n depends on SYNC_FILE - depends on DEBUG_FS help A sync object driver that uses a 32bit counter to coordinate synchronization. Useful when there is no hardware primitive backing @@ -80,7 +79,7 @@ menuconfig DMABUF_HEAPS menuconfig DMABUF_SYSFS_STATS bool "DMA-BUF sysfs statistics" - depends on DMA_SHARED_BUFFER + select DMA_SHARED_BUFFER help Choose this option to enable DMA-BUF sysfs statistics in location /sys/kernel/dmabuf/buffers. diff --git a/drivers/dma-buf/dma-buf-sysfs-stats.h b/drivers/dma-buf/dma-buf-sysfs-stats.h index a49c6e2650cc..5f4703249117 100644 --- a/drivers/dma-buf/dma-buf-sysfs-stats.h +++ b/drivers/dma-buf/dma-buf-sysfs-stats.h @@ -14,8 +14,23 @@ int dma_buf_init_sysfs_statistics(void); void dma_buf_uninit_sysfs_statistics(void); int dma_buf_stats_setup(struct dma_buf *dmabuf); +int dma_buf_attach_stats_setup(struct dma_buf_attachment *attach, + unsigned int uid); +static inline void dma_buf_update_attachment_map_count(struct dma_buf_attachment *attach, + int delta) +{ + struct dma_buf_attach_sysfs_entry *entry = attach->sysfs_entry; + entry->map_counter += delta; +} void dma_buf_stats_teardown(struct dma_buf *dmabuf); +void dma_buf_attach_stats_teardown(struct dma_buf_attachment *attach); +static inline unsigned int dma_buf_update_attach_uid(struct dma_buf *dmabuf) +{ + struct dma_buf_sysfs_entry *entry = dmabuf->sysfs_entry; + + return entry->attachment_uid++; +} #else static inline int dma_buf_init_sysfs_statistics(void) @@ -29,7 +44,19 @@ static inline int dma_buf_stats_setup(struct dma_buf *dmabuf) { return 0; } +static inline int dma_buf_attach_stats_setup(struct dma_buf_attachment *attach, + unsigned int uid) +{ + return 0; +} static inline void dma_buf_stats_teardown(struct dma_buf *dmabuf) {} +static inline void dma_buf_attach_stats_teardown(struct dma_buf_attachment *attach) {} +static inline void dma_buf_update_attachment_map_count(struct dma_buf_attachment *attach, + int delta) {} +static inline unsigned int dma_buf_update_attach_uid(struct dma_buf *dmabuf) +{ + return 0; +} #endif #endif // _DMA_BUF_SYSFS_STATS_H diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 89c10136b26f..3ef87860877a 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -32,8 +32,6 @@ #include "dma-buf-sysfs-stats.h" #include "dma-buf-process-info.h" -static inline int is_dma_buf_file(struct file *); - struct dma_buf_list { struct list_head head; struct mutex lock; @@ -41,6 +39,30 @@ struct dma_buf_list { static struct dma_buf_list db_list; +/* + * This function helps in traversing the db_list and calls the + * callback function which can extract required info out of each + * dmabuf. + */ +int get_each_dmabuf(int (*callback)(const struct dma_buf *dmabuf, + void *private), void *private) +{ + struct dma_buf *buf; + int ret = mutex_lock_interruptible(&db_list.lock); + + if (ret) + return ret; + + list_for_each_entry(buf, &db_list.head, list_node) { + ret = callback(buf, private); + if (ret) + break; + } + mutex_unlock(&db_list.lock); + return ret; +} +EXPORT_SYMBOL_GPL(get_each_dmabuf); + static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen) { struct dma_buf *dmabuf; @@ -129,6 +151,54 @@ static struct file_system_type dma_buf_fs_type = { .kill_sb = kill_anon_super, }; +#ifdef CONFIG_DMABUF_SYSFS_STATS +static void dma_buf_vma_open(struct vm_area_struct *vma) +{ + struct dma_buf *dmabuf = vma->vm_file->private_data; + + dmabuf->mmap_count++; + /* call the heap provided vma open() op */ + if (dmabuf->exp_vm_ops->open) + dmabuf->exp_vm_ops->open(vma); +} + +static void dma_buf_vma_close(struct vm_area_struct *vma) +{ + struct dma_buf *dmabuf = vma->vm_file->private_data; + + if (dmabuf->mmap_count) + dmabuf->mmap_count--; + /* call the heap provided vma close() op */ + if (dmabuf->exp_vm_ops->close) + dmabuf->exp_vm_ops->close(vma); +} + +static int dma_buf_do_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) +{ + /* call this first because the exporter might override vma->vm_ops */ + int ret = dmabuf->ops->mmap(dmabuf, vma); + + if (ret) + return ret; + + /* save the exporter provided vm_ops */ + dmabuf->exp_vm_ops = vma->vm_ops; + dmabuf->vm_ops = *(dmabuf->exp_vm_ops); + /* override open() and close() to provide buffer mmap count */ + dmabuf->vm_ops.open = dma_buf_vma_open; + dmabuf->vm_ops.close = dma_buf_vma_close; + vma->vm_ops = &dmabuf->vm_ops; + dmabuf->mmap_count++; + + return ret; +} +#else /* CONFIG_DMABUF_SYSFS_STATS */ +static int dma_buf_do_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) +{ + return dmabuf->ops->mmap(dmabuf, vma); +} +#endif /* CONFIG_DMABUF_SYSFS_STATS */ + static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma) { struct dma_buf *dmabuf; @@ -147,7 +217,7 @@ static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma) dmabuf->size >> PAGE_SHIFT) return -EINVAL; - return dmabuf->ops->mmap(dmabuf, vma); + return dma_buf_do_mmap(dmabuf, vma); } static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence) @@ -442,10 +512,11 @@ static const struct file_operations dma_buf_fops = { /* * is_dma_buf_file - Check if struct file* is associated with dma_buf */ -static inline int is_dma_buf_file(struct file *file) +int is_dma_buf_file(struct file *file) { return file->f_op == &dma_buf_fops; } +EXPORT_SYMBOL_GPL(is_dma_buf_file); static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags) { @@ -1132,6 +1203,30 @@ int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, } EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access); +int dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf, + enum dma_data_direction direction, + unsigned int offset, unsigned int len) +{ + int ret = 0; + + if (WARN_ON(!dmabuf)) + return -EINVAL; + + if (dmabuf->ops->begin_cpu_access_partial) + ret = dmabuf->ops->begin_cpu_access_partial(dmabuf, direction, + offset, len); + + /* Ensure that all fences are waited upon - but we first allow + * the native handler the chance to do so more efficiently if it + * chooses. A double invocation here will be reasonably cheap no-op. + */ + if (ret == 0) + ret = __dma_buf_begin_cpu_access(dmabuf, direction); + + return ret; +} +EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access_partial); + /** * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific @@ -1158,6 +1253,21 @@ int dma_buf_end_cpu_access(struct dma_buf *dmabuf, } EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access); +int dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf, + enum dma_data_direction direction, + unsigned int offset, unsigned int len) +{ + int ret = 0; + + WARN_ON(!dmabuf); + + if (dmabuf->ops->end_cpu_access_partial) + ret = dmabuf->ops->end_cpu_access_partial(dmabuf, direction, + offset, len); + + return ret; +} +EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access_partial); /** * dma_buf_mmap - Setup up a userspace mmap with the given vma @@ -1286,6 +1396,32 @@ void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr) } EXPORT_SYMBOL_GPL(dma_buf_vunmap); +int dma_buf_get_flags(struct dma_buf *dmabuf, unsigned long *flags) +{ + int ret = 0; + + if (WARN_ON(!dmabuf) || !flags) + return -EINVAL; + + if (dmabuf->ops->get_flags) + ret = dmabuf->ops->get_flags(dmabuf, flags); + + return ret; +} +EXPORT_SYMBOL_GPL(dma_buf_get_flags); + +int dma_buf_get_uuid(struct dma_buf *dmabuf, uuid_t *uuid) +{ + if (WARN_ON(!dmabuf) || !uuid) + return -EINVAL; + + if (!dmabuf->ops->get_uuid) + return -ENODEV; + + return dmabuf->ops->get_uuid(dmabuf, uuid); +} +EXPORT_SYMBOL_GPL(dma_buf_get_uuid); + #ifdef CONFIG_DEBUG_FS static int dma_buf_debug_show(struct seq_file *s, void *unused) { diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index 7475e09b0680..d64fc03929be 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -312,22 +312,25 @@ void __dma_fence_might_wait(void) /** - * dma_fence_signal_locked - signal completion of a fence + * dma_fence_signal_timestamp_locked - signal completion of a fence * @fence: the fence to signal + * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain * * Signal completion for software callbacks on a fence, this will unblock * dma_fence_wait() calls and run all the callbacks added with * dma_fence_add_callback(). Can be called multiple times, but since a fence * can only go from the unsignaled to the signaled state and not back, it will - * only be effective the first time. + * only be effective the first time. Set the timestamp provided as the fence + * signal timestamp. * - * Unlike dma_fence_signal(), this function must be called with &dma_fence.lock - * held. + * Unlike dma_fence_signal_timestamp(), this function must be called with + * &dma_fence.lock held. * * Returns 0 on success and a negative error value when @fence has been * signalled already. */ -int dma_fence_signal_locked(struct dma_fence *fence) +int dma_fence_signal_timestamp_locked(struct dma_fence *fence, + ktime_t timestamp) { struct dma_fence_cb *cur, *tmp; struct list_head cb_list; @@ -341,7 +344,7 @@ int dma_fence_signal_locked(struct dma_fence *fence) /* Stash the cb_list before replacing it with the timestamp */ list_replace(&fence->cb_list, &cb_list); - fence->timestamp = ktime_get(); + fence->timestamp = timestamp; set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags); trace_dma_fence_signaled(fence); @@ -352,6 +355,59 @@ int dma_fence_signal_locked(struct dma_fence *fence) return 0; } +EXPORT_SYMBOL(dma_fence_signal_timestamp_locked); + +/** + * dma_fence_signal_timestamp - signal completion of a fence + * @fence: the fence to signal + * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain + * + * Signal completion for software callbacks on a fence, this will unblock + * dma_fence_wait() calls and run all the callbacks added with + * dma_fence_add_callback(). Can be called multiple times, but since a fence + * can only go from the unsignaled to the signaled state and not back, it will + * only be effective the first time. Set the timestamp provided as the fence + * signal timestamp. + * + * Returns 0 on success and a negative error value when @fence has been + * signalled already. + */ +int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp) +{ + unsigned long flags; + int ret; + + if (!fence) + return -EINVAL; + + spin_lock_irqsave(fence->lock, flags); + ret = dma_fence_signal_timestamp_locked(fence, timestamp); + spin_unlock_irqrestore(fence->lock, flags); + + return ret; +} +EXPORT_SYMBOL(dma_fence_signal_timestamp); + +/** + * dma_fence_signal_locked - signal completion of a fence + * @fence: the fence to signal + * + * Signal completion for software callbacks on a fence, this will unblock + * dma_fence_wait() calls and run all the callbacks added with + * dma_fence_add_callback(). Can be called multiple times, but since a fence + * can only go from the unsignaled to the signaled state and not back, it will + * only be effective the first time. + * + * Unlike dma_fence_signal(), this function must be called with &dma_fence.lock + * held. + * + * Returns 0 on success and a negative error value when @fence has been + * signalled already. + */ +int dma_fence_signal_locked(struct dma_fence *fence) +{ + return dma_fence_signal_timestamp_locked(fence, ktime_get()); +} EXPORT_SYMBOL(dma_fence_signal_locked); /** @@ -379,7 +435,7 @@ int dma_fence_signal(struct dma_fence *fence) tmp = dma_fence_begin_signalling(); spin_lock_irqsave(fence->lock, flags); - ret = dma_fence_signal_locked(fence); + ret = dma_fence_signal_timestamp_locked(fence, ktime_get()); spin_unlock_irqrestore(fence->lock, flags); dma_fence_end_signalling(tmp); diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c index bbbfa28b2f6c..6c22995616a2 100644 --- a/drivers/dma-buf/dma-heap.c +++ b/drivers/dma-buf/dma-heap.c @@ -31,6 +31,7 @@ * @heap_devt heap device node * @list list head connecting to list of heaps * @heap_cdev heap char device + * @heap_dev heap device struct * * Represents a heap of memory from which buffers can be made. */ @@ -41,6 +42,8 @@ struct dma_heap { dev_t heap_devt; struct list_head list; struct cdev heap_cdev; + struct kref refcount; + struct device *heap_dev; }; static LIST_HEAD(heap_list); @@ -49,20 +52,72 @@ static dev_t dma_heap_devt; static struct class *dma_heap_class; static DEFINE_XARRAY_ALLOC(dma_heap_minors); -static int dma_heap_buffer_alloc(struct dma_heap *heap, size_t len, - unsigned int fd_flags, - unsigned int heap_flags) +struct dma_heap *dma_heap_find(const char *name) { + struct dma_heap *h; + + mutex_lock(&heap_list_lock); + list_for_each_entry(h, &heap_list, list) { + if (!strcmp(h->name, name)) { + kref_get(&h->refcount); + mutex_unlock(&heap_list_lock); + return h; + } + } + mutex_unlock(&heap_list_lock); + return NULL; +} +EXPORT_SYMBOL_GPL(dma_heap_find); + + +void dma_heap_buffer_free(struct dma_buf *dmabuf) +{ + dma_buf_put(dmabuf); +} +EXPORT_SYMBOL_GPL(dma_heap_buffer_free); + +struct dma_buf *dma_heap_buffer_alloc(struct dma_heap *heap, size_t len, + unsigned int fd_flags, + unsigned int heap_flags) +{ + if (fd_flags & ~DMA_HEAP_VALID_FD_FLAGS) + return ERR_PTR(-EINVAL); + + if (heap_flags & ~DMA_HEAP_VALID_HEAP_FLAGS) + return ERR_PTR(-EINVAL); /* * Allocations from all heaps have to begin * and end on page boundaries. */ len = PAGE_ALIGN(len); if (!len) - return -EINVAL; + return ERR_PTR(-EINVAL); return heap->ops->allocate(heap, len, fd_flags, heap_flags); } +EXPORT_SYMBOL_GPL(dma_heap_buffer_alloc); + +int dma_heap_bufferfd_alloc(struct dma_heap *heap, size_t len, + unsigned int fd_flags, + unsigned int heap_flags) +{ + struct dma_buf *dmabuf; + int fd; + + dmabuf = dma_heap_buffer_alloc(heap, len, fd_flags, heap_flags); + + if (IS_ERR(dmabuf)) + return PTR_ERR(dmabuf); + + fd = dma_buf_fd(dmabuf, fd_flags); + if (fd < 0) { + dma_buf_put(dmabuf); + /* just return, as put will call release and that will free */ + } + return fd; + +} +EXPORT_SYMBOL_GPL(dma_heap_bufferfd_alloc); static int dma_heap_open(struct inode *inode, struct file *file) { @@ -90,15 +145,9 @@ static long dma_heap_ioctl_allocate(struct file *file, void *data) if (heap_allocation->fd) return -EINVAL; - if (heap_allocation->fd_flags & ~DMA_HEAP_VALID_FD_FLAGS) - return -EINVAL; - - if (heap_allocation->heap_flags & ~DMA_HEAP_VALID_HEAP_FLAGS) - return -EINVAL; - - fd = dma_heap_buffer_alloc(heap, heap_allocation->len, - heap_allocation->fd_flags, - heap_allocation->heap_flags); + fd = dma_heap_bufferfd_alloc(heap, heap_allocation->len, + heap_allocation->fd_flags, + heap_allocation->heap_flags); if (fd < 0) return fd; @@ -191,6 +240,47 @@ void *dma_heap_get_drvdata(struct dma_heap *heap) { return heap->priv; } +EXPORT_SYMBOL_GPL(dma_heap_get_drvdata); + +static void dma_heap_release(struct kref *ref) +{ + struct dma_heap *heap = container_of(ref, struct dma_heap, refcount); + int minor = MINOR(heap->heap_devt); + + /* Note, we already holding the heap_list_lock here */ + list_del(&heap->list); + + device_destroy(dma_heap_class, heap->heap_devt); + cdev_del(&heap->heap_cdev); + xa_erase(&dma_heap_minors, minor); + + kfree(heap); +} + +void dma_heap_put(struct dma_heap *h) +{ + /* + * Take the heap_list_lock now to avoid racing with code + * scanning the list and then taking a kref. + */ + mutex_lock(&heap_list_lock); + kref_put(&h->refcount, dma_heap_release); + mutex_unlock(&heap_list_lock); +} +EXPORT_SYMBOL_GPL(dma_heap_put); + +/** + * dma_heap_get_dev() - get device struct for the heap + * @heap: DMA-Heap to retrieve device struct from + * + * Returns: + * The device struct for the heap. + */ +struct device *dma_heap_get_dev(struct dma_heap *heap) +{ + return heap->heap_dev; +} +EXPORT_SYMBOL_GPL(dma_heap_get_dev); /** * dma_heap_get_name() - get heap name @@ -203,11 +293,11 @@ const char *dma_heap_get_name(struct dma_heap *heap) { return heap->name; } +EXPORT_SYMBOL_GPL(dma_heap_get_name); struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info) { - struct dma_heap *heap, *h, *err_ret; - struct device *dev_ret; + struct dma_heap *heap, *err_ret; unsigned int minor; int ret; @@ -221,10 +311,18 @@ struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info) return ERR_PTR(-EINVAL); } + heap = dma_heap_find(exp_info->name); + if (heap) { + pr_err("dma_heap: Already registered heap named %s\n", + exp_info->name); + dma_heap_put(heap); + return ERR_PTR(-EINVAL); + } heap = kzalloc(sizeof(*heap), GFP_KERNEL); if (!heap) return ERR_PTR(-ENOMEM); + kref_init(&heap->refcount); heap->name = exp_info->name; heap->ops = exp_info->ops; heap->priv = exp_info->priv; @@ -249,28 +347,20 @@ struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info) goto err1; } - dev_ret = device_create(dma_heap_class, - NULL, - heap->heap_devt, - NULL, - heap->name); - if (IS_ERR(dev_ret)) { + heap->heap_dev = device_create(dma_heap_class, + NULL, + heap->heap_devt, + NULL, + heap->name); + if (IS_ERR(heap->heap_dev)) { pr_err("dma_heap: Unable to create device\n"); - err_ret = ERR_CAST(dev_ret); + err_ret = ERR_CAST(heap->heap_dev); goto err2; } - mutex_lock(&heap_list_lock); - /* check the name is unique */ - list_for_each_entry(h, &heap_list, list) { - if (!strcmp(h->name, exp_info->name)) { - mutex_unlock(&heap_list_lock); - pr_err("dma_heap: Already registered heap named %s\n", - exp_info->name); - err_ret = ERR_PTR(-EINVAL); - goto err3; - } - } + /* Make sure it doesn't disappear on us */ + heap->heap_dev = get_device(heap->heap_dev); + /* Add heap to the list */ list_add(&heap->list, &heap_list); @@ -288,27 +378,88 @@ struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info) kfree(heap); return err_ret; } +EXPORT_SYMBOL_GPL(dma_heap_add); static char *dma_heap_devnode(struct device *dev, umode_t *mode) { return kasprintf(GFP_KERNEL, "dma_heap/%s", dev_name(dev)); } +static ssize_t total_pools_kb_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct dma_heap *heap; + u64 total_pool_size = 0; + + mutex_lock(&heap_list_lock); + list_for_each_entry(heap, &heap_list, list) { + if (heap->ops->get_pool_size) + total_pool_size += heap->ops->get_pool_size(heap); + } + mutex_unlock(&heap_list_lock); + + return sysfs_emit(buf, "%llu\n", total_pool_size / 1024); +} + +static struct kobj_attribute total_pools_kb_attr = + __ATTR_RO(total_pools_kb); + +static struct attribute *dma_heap_sysfs_attrs[] = { + &total_pools_kb_attr.attr, + NULL, +}; + +ATTRIBUTE_GROUPS(dma_heap_sysfs); + +static struct kobject *dma_heap_kobject; + +static int dma_heap_sysfs_setup(void) +{ + int ret; + + dma_heap_kobject = kobject_create_and_add("dma_heap", kernel_kobj); + if (!dma_heap_kobject) + return -ENOMEM; + + ret = sysfs_create_groups(dma_heap_kobject, dma_heap_sysfs_groups); + if (ret) { + kobject_put(dma_heap_kobject); + return ret; + } + + return 0; +} + +static void dma_heap_sysfs_teardown(void) +{ + kobject_put(dma_heap_kobject); +} + static int dma_heap_init(void) { int ret; - ret = alloc_chrdev_region(&dma_heap_devt, 0, NUM_HEAP_MINORS, DEVNAME); + ret = dma_heap_sysfs_setup(); if (ret) return ret; + ret = alloc_chrdev_region(&dma_heap_devt, 0, NUM_HEAP_MINORS, DEVNAME); + if (ret) + goto err_chrdev; + dma_heap_class = class_create(THIS_MODULE, DEVNAME); if (IS_ERR(dma_heap_class)) { - unregister_chrdev_region(dma_heap_devt, NUM_HEAP_MINORS); - return PTR_ERR(dma_heap_class); + ret = PTR_ERR(dma_heap_class); + goto err_class; } dma_heap_class->devnode = dma_heap_devnode; return 0; + +err_class: + unregister_chrdev_region(dma_heap_devt, NUM_HEAP_MINORS); +err_chrdev: + dma_heap_sysfs_teardown(); + return ret; } subsys_initcall(dma_heap_init); diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig index a5eef06c4226..ff52efa83f39 100644 --- a/drivers/dma-buf/heaps/Kconfig +++ b/drivers/dma-buf/heaps/Kconfig @@ -1,12 +1,22 @@ +menuconfig DMABUF_HEAPS_DEFERRED_FREE + bool "DMA-BUF heaps deferred-free library" + help + Choose this option to enable the DMA-BUF heaps deferred-free library. + +menuconfig DMABUF_HEAPS_PAGE_POOL + bool "DMA-BUF heaps page-pool library" + help + Choose this option to enable the DMA-BUF heaps page-pool library. + config DMABUF_HEAPS_SYSTEM - bool "DMA-BUF System Heap" - depends on DMABUF_HEAPS + tristate "DMA-BUF System Heap" + depends on DMABUF_HEAPS && DMABUF_HEAPS_DEFERRED_FREE && DMABUF_HEAPS_PAGE_POOL help Choose this option to enable the system dmabuf heap. The system heap is backed by pages from the buddy allocator. If in doubt, say Y. config DMABUF_HEAPS_CMA - bool "DMA-BUF CMA Heap" + tristate "DMA-BUF CMA Heap" depends on DMABUF_HEAPS && DMA_CMA help Choose this option to enable dma-buf CMA heap. This heap is backed diff --git a/drivers/dma-buf/heaps/Makefile b/drivers/dma-buf/heaps/Makefile index 6e54cdec3da0..4d4cd94a3a4a 100644 --- a/drivers/dma-buf/heaps/Makefile +++ b/drivers/dma-buf/heaps/Makefile @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -obj-y += heap-helpers.o +obj-$(CONFIG_DMABUF_HEAPS_DEFERRED_FREE) += deferred-free-helper.o +obj-$(CONFIG_DMABUF_HEAPS_PAGE_POOL) += page_pool.o obj-$(CONFIG_DMABUF_HEAPS_SYSTEM) += system_heap.o obj-$(CONFIG_DMABUF_HEAPS_CMA) += cma_heap.o diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c index e55384dc115b..4931578df815 100644 --- a/drivers/dma-buf/heaps/cma_heap.c +++ b/drivers/dma-buf/heaps/cma_heap.c @@ -2,76 +2,304 @@ /* * DMABUF CMA heap exporter * - * Copyright (C) 2012, 2019 Linaro Ltd. + * Copyright (C) 2012, 2019, 2020 Linaro Ltd. * Author: for ST-Ericsson. + * + * Also utilizing parts of Andrew Davis' SRAM heap: + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ + * Andrew F. Davis */ - #include -#include #include #include #include #include -#include #include +#include +#include #include -#include #include -#include +#include +#include -#include "heap-helpers.h" struct cma_heap { struct dma_heap *heap; struct cma *cma; }; -static void cma_heap_free(struct heap_helper_buffer *buffer) +struct cma_heap_buffer { + struct cma_heap *heap; + struct list_head attachments; + struct mutex lock; + unsigned long len; + struct page *cma_pages; + struct page **pages; + pgoff_t pagecount; + int vmap_cnt; + void *vaddr; +}; + +struct dma_heap_attachment { + struct device *dev; + struct sg_table table; + struct list_head list; + bool mapped; +}; + +static int cma_heap_attach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attachment) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a; + int ret; + + a = kzalloc(sizeof(*a), GFP_KERNEL); + if (!a) + return -ENOMEM; + + ret = sg_alloc_table_from_pages(&a->table, buffer->pages, + buffer->pagecount, 0, + buffer->pagecount << PAGE_SHIFT, + GFP_KERNEL); + if (ret) { + kfree(a); + return ret; + } + + a->dev = attachment->dev; + INIT_LIST_HEAD(&a->list); + a->mapped = false; + + attachment->priv = a; + + mutex_lock(&buffer->lock); + list_add(&a->list, &buffer->attachments); + mutex_unlock(&buffer->lock); + + return 0; +} + +static void cma_heap_detach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attachment) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a = attachment->priv; + + mutex_lock(&buffer->lock); + list_del(&a->list); + mutex_unlock(&buffer->lock); + + sg_free_table(&a->table); + kfree(a); +} + +static struct sg_table *cma_heap_map_dma_buf(struct dma_buf_attachment *attachment, + enum dma_data_direction direction) { - struct cma_heap *cma_heap = dma_heap_get_drvdata(buffer->heap); - unsigned long nr_pages = buffer->pagecount; - struct page *cma_pages = buffer->priv_virt; + struct dma_heap_attachment *a = attachment->priv; + struct sg_table *table = &a->table; + int ret; + + ret = dma_map_sgtable(attachment->dev, table, direction, 0); + if (ret) + return ERR_PTR(-ENOMEM); + a->mapped = true; + return table; +} + +static void cma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment, + struct sg_table *table, + enum dma_data_direction direction) +{ + struct dma_heap_attachment *a = attachment->priv; + + a->mapped = false; + dma_unmap_sgtable(attachment->dev, table, direction, 0); +} + +static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction direction) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a; + + if (buffer->vmap_cnt) + invalidate_kernel_vmap_range(buffer->vaddr, buffer->len); + + mutex_lock(&buffer->lock); + list_for_each_entry(a, &buffer->attachments, list) { + if (!a->mapped) + continue; + dma_sync_sgtable_for_cpu(a->dev, &a->table, direction); + } + mutex_unlock(&buffer->lock); + + return 0; +} + +static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction direction) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a; + + if (buffer->vmap_cnt) + flush_kernel_vmap_range(buffer->vaddr, buffer->len); + + mutex_lock(&buffer->lock); + list_for_each_entry(a, &buffer->attachments, list) { + if (!a->mapped) + continue; + dma_sync_sgtable_for_device(a->dev, &a->table, direction); + } + mutex_unlock(&buffer->lock); + + return 0; +} + +static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct cma_heap_buffer *buffer = vma->vm_private_data; + + if (vmf->pgoff > buffer->pagecount) + return VM_FAULT_SIGBUS; + + vmf->page = buffer->pages[vmf->pgoff]; + get_page(vmf->page); + + return 0; +} + +static const struct vm_operations_struct dma_heap_vm_ops = { + .fault = cma_heap_vm_fault, +}; + +static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + + if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0) + return -EINVAL; + + vma->vm_ops = &dma_heap_vm_ops; + vma->vm_private_data = buffer; + + return 0; +} + +static void *cma_heap_do_vmap(struct cma_heap_buffer *buffer) +{ + void *vaddr; + + vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL); + if (!vaddr) + return ERR_PTR(-ENOMEM); + + return vaddr; +} + +static void *cma_heap_vmap(struct dma_buf *dmabuf) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + void *vaddr; + + mutex_lock(&buffer->lock); + if (buffer->vmap_cnt) { + buffer->vmap_cnt++; + vaddr = buffer->vaddr; + goto out; + } + + vaddr = cma_heap_do_vmap(buffer); + if (IS_ERR(vaddr)) + goto out; + + buffer->vaddr = vaddr; + buffer->vmap_cnt++; +out: + mutex_unlock(&buffer->lock); + + return vaddr; +} + +static void cma_heap_vunmap(struct dma_buf *dmabuf, void *vaddr) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + + mutex_lock(&buffer->lock); + if (!--buffer->vmap_cnt) { + vunmap(buffer->vaddr); + buffer->vaddr = NULL; + } + mutex_unlock(&buffer->lock); +} + +static void cma_heap_dma_buf_release(struct dma_buf *dmabuf) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + struct cma_heap *cma_heap = buffer->heap; + + if (buffer->vmap_cnt > 0) { + WARN(1, "%s: buffer still mapped in the kernel\n", __func__); + vunmap(buffer->vaddr); + } /* free page list */ kfree(buffer->pages); /* release memory */ - cma_release(cma_heap->cma, cma_pages, nr_pages); + cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount); kfree(buffer); } -/* dmabuf heap CMA operations functions */ -static int cma_heap_allocate(struct dma_heap *heap, - unsigned long len, - unsigned long fd_flags, - unsigned long heap_flags) +static const struct dma_buf_ops cma_heap_buf_ops = { + .attach = cma_heap_attach, + .detach = cma_heap_detach, + .map_dma_buf = cma_heap_map_dma_buf, + .unmap_dma_buf = cma_heap_unmap_dma_buf, + .begin_cpu_access = cma_heap_dma_buf_begin_cpu_access, + .end_cpu_access = cma_heap_dma_buf_end_cpu_access, + .mmap = cma_heap_mmap, + .vmap = cma_heap_vmap, + .vunmap = cma_heap_vunmap, + .release = cma_heap_dma_buf_release, +}; + +static struct dma_buf *cma_heap_allocate(struct dma_heap *heap, + unsigned long len, + unsigned long fd_flags, + unsigned long heap_flags) { struct cma_heap *cma_heap = dma_heap_get_drvdata(heap); - struct heap_helper_buffer *helper_buffer; - struct page *cma_pages; + struct cma_heap_buffer *buffer; + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); size_t size = PAGE_ALIGN(len); - unsigned long nr_pages = size >> PAGE_SHIFT; + pgoff_t pagecount = size >> PAGE_SHIFT; unsigned long align = get_order(size); + struct page *cma_pages; struct dma_buf *dmabuf; int ret = -ENOMEM; pgoff_t pg; - if (align > CONFIG_CMA_ALIGNMENT) - align = CONFIG_CMA_ALIGNMENT; + buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); + if (!buffer) + return ERR_PTR(-ENOMEM); - helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL); - if (!helper_buffer) - return -ENOMEM; + INIT_LIST_HEAD(&buffer->attachments); + mutex_init(&buffer->lock); + buffer->len = size; - init_heap_helper_buffer(helper_buffer, cma_heap_free); - helper_buffer->heap = heap; - helper_buffer->size = len; + if (align > CONFIG_CMA_ALIGNMENT) + align = CONFIG_CMA_ALIGNMENT; - cma_pages = cma_alloc(cma_heap->cma, nr_pages, align, false); + cma_pages = cma_alloc(cma_heap->cma, pagecount, align, GFP_KERNEL); if (!cma_pages) - goto free_buf; + goto free_buffer; + /* Clear the cma pages */ if (PageHighMem(cma_pages)) { - unsigned long nr_clear_pages = nr_pages; + unsigned long nr_clear_pages = pagecount; struct page *page = cma_pages; while (nr_clear_pages > 0) { @@ -85,7 +313,6 @@ static int cma_heap_allocate(struct dma_heap *heap, */ if (fatal_signal_pending(current)) goto free_cma; - page++; nr_clear_pages--; } @@ -93,44 +320,41 @@ static int cma_heap_allocate(struct dma_heap *heap, memset(page_address(cma_pages), 0, size); } - helper_buffer->pagecount = nr_pages; - helper_buffer->pages = kmalloc_array(helper_buffer->pagecount, - sizeof(*helper_buffer->pages), - GFP_KERNEL); - if (!helper_buffer->pages) { + buffer->pages = kmalloc_array(pagecount, sizeof(*buffer->pages), GFP_KERNEL); + if (!buffer->pages) { ret = -ENOMEM; goto free_cma; } - for (pg = 0; pg < helper_buffer->pagecount; pg++) - helper_buffer->pages[pg] = &cma_pages[pg]; + for (pg = 0; pg < pagecount; pg++) + buffer->pages[pg] = &cma_pages[pg]; + + buffer->cma_pages = cma_pages; + buffer->heap = cma_heap; + buffer->pagecount = pagecount; /* create the dmabuf */ - dmabuf = heap_helper_export_dmabuf(helper_buffer, fd_flags); + exp_info.exp_name = dma_heap_get_name(heap); + exp_info.ops = &cma_heap_buf_ops; + exp_info.size = buffer->len; + exp_info.flags = fd_flags; + exp_info.priv = buffer; + dmabuf = dma_buf_export(&exp_info); if (IS_ERR(dmabuf)) { ret = PTR_ERR(dmabuf); goto free_pages; } - helper_buffer->dmabuf = dmabuf; - helper_buffer->priv_virt = cma_pages; - - ret = dma_buf_fd(dmabuf, fd_flags); - if (ret < 0) { - dma_buf_put(dmabuf); - /* just return, as put will call release and that will free */ - return ret; - } - - return ret; + return dmabuf; free_pages: - kfree(helper_buffer->pages); + kfree(buffer->pages); free_cma: - cma_release(cma_heap->cma, cma_pages, nr_pages); -free_buf: - kfree(helper_buffer); - return ret; + cma_release(cma_heap->cma, cma_pages, pagecount); +free_buffer: + kfree(buffer); + + return ERR_PTR(ret); } static const struct dma_heap_ops cma_heap_ops = { diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c index 0bf688e3c023..15796bc4c033 100644 --- a/drivers/dma-buf/heaps/system_heap.c +++ b/drivers/dma-buf/heaps/system_heap.c @@ -3,7 +3,11 @@ * DMABUF System heap exporter * * Copyright (C) 2011 Google, Inc. - * Copyright (C) 2019 Linaro Ltd. + * Copyright (C) 2019, 2020 Linaro Ltd. + * + * Portions based off of Andrew Davis' SRAM heap: + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ + * Andrew F. Davis */ #include @@ -15,99 +19,546 @@ #include #include #include -#include -#include +#include + +#include "page_pool.h" +#include "deferred-free-helper.h" + +static struct dma_heap *sys_heap; +static struct dma_heap *sys_uncached_heap; + +struct system_heap_buffer { + struct dma_heap *heap; + struct list_head attachments; + struct mutex lock; + unsigned long len; + struct sg_table sg_table; + int vmap_cnt; + void *vaddr; + struct deferred_freelist_item deferred_free; + + bool uncached; +}; + +struct dma_heap_attachment { + struct device *dev; + struct sg_table *table; + struct list_head list; + bool mapped; -#include "heap-helpers.h" + bool uncached; +}; -struct dma_heap *sys_heap; +#define HIGH_ORDER_GFP (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \ + | __GFP_NORETRY) & ~__GFP_RECLAIM) \ + | __GFP_COMP) +#define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO | __GFP_COMP) +static gfp_t order_flags[] = {HIGH_ORDER_GFP, LOW_ORDER_GFP, LOW_ORDER_GFP}; +/* + * The selection of the orders used for allocation (1MB, 64K, 4K) is designed + * to match with the sizes often found in IOMMUs. Using order 4 pages instead + * of order 0 pages can significantly improve the performance of many IOMMUs + * by reducing TLB pressure and time spent updating page tables. + */ +static const unsigned int orders[] = {8, 4, 0}; +#define NUM_ORDERS ARRAY_SIZE(orders) +struct dmabuf_page_pool *pools[NUM_ORDERS]; -static void system_heap_free(struct heap_helper_buffer *buffer) +static struct sg_table *dup_sg_table(struct sg_table *table) { - pgoff_t pg; + struct sg_table *new_table; + int ret, i; + struct scatterlist *sg, *new_sg; - for (pg = 0; pg < buffer->pagecount; pg++) - __free_page(buffer->pages[pg]); - kfree(buffer->pages); - kfree(buffer); + new_table = kzalloc(sizeof(*new_table), GFP_KERNEL); + if (!new_table) + return ERR_PTR(-ENOMEM); + + ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL); + if (ret) { + kfree(new_table); + return ERR_PTR(-ENOMEM); + } + + new_sg = new_table->sgl; + for_each_sgtable_sg(table, sg, i) { + sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset); + new_sg = sg_next(new_sg); + } + + return new_table; } -static int system_heap_allocate(struct dma_heap *heap, - unsigned long len, - unsigned long fd_flags, - unsigned long heap_flags) +static int system_heap_attach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attachment) { - struct heap_helper_buffer *helper_buffer; - struct dma_buf *dmabuf; - int ret = -ENOMEM; - pgoff_t pg; + struct system_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a; + struct sg_table *table; + + a = kzalloc(sizeof(*a), GFP_KERNEL); + if (!a) + return -ENOMEM; - helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL); - if (!helper_buffer) + table = dup_sg_table(&buffer->sg_table); + if (IS_ERR(table)) { + kfree(a); return -ENOMEM; + } + + a->table = table; + a->dev = attachment->dev; + INIT_LIST_HEAD(&a->list); + a->mapped = false; + a->uncached = buffer->uncached; + attachment->priv = a; + + mutex_lock(&buffer->lock); + list_add(&a->list, &buffer->attachments); + mutex_unlock(&buffer->lock); + + return 0; +} + +static void system_heap_detach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attachment) +{ + struct system_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a = attachment->priv; + + mutex_lock(&buffer->lock); + list_del(&a->list); + mutex_unlock(&buffer->lock); + + sg_free_table(a->table); + kfree(a->table); + kfree(a); +} - init_heap_helper_buffer(helper_buffer, system_heap_free); - helper_buffer->heap = heap; - helper_buffer->size = len; +static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attachment, + enum dma_data_direction direction) +{ + struct dma_heap_attachment *a = attachment->priv; + struct sg_table *table = a->table; + int attr = 0; + int ret; + + if (a->uncached) + attr = DMA_ATTR_SKIP_CPU_SYNC; + + ret = dma_map_sgtable(attachment->dev, table, direction, attr); + if (ret) + return ERR_PTR(ret); + + a->mapped = true; + return table; +} + +static void system_heap_unmap_dma_buf(struct dma_buf_attachment *attachment, + struct sg_table *table, + enum dma_data_direction direction) +{ + struct dma_heap_attachment *a = attachment->priv; + int attr = 0; + + if (a->uncached) + attr = DMA_ATTR_SKIP_CPU_SYNC; + a->mapped = false; + dma_unmap_sgtable(attachment->dev, table, direction, attr); +} - helper_buffer->pagecount = len / PAGE_SIZE; - helper_buffer->pages = kmalloc_array(helper_buffer->pagecount, - sizeof(*helper_buffer->pages), - GFP_KERNEL); - if (!helper_buffer->pages) { - ret = -ENOMEM; - goto err0; +static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction direction) +{ + struct system_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a; + + mutex_lock(&buffer->lock); + + if (buffer->vmap_cnt) + invalidate_kernel_vmap_range(buffer->vaddr, buffer->len); + + if (!buffer->uncached) { + list_for_each_entry(a, &buffer->attachments, list) { + if (!a->mapped) + continue; + dma_sync_sgtable_for_cpu(a->dev, a->table, direction); + } } + mutex_unlock(&buffer->lock); + + return 0; +} + +static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction direction) +{ + struct system_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a; + + mutex_lock(&buffer->lock); + + if (buffer->vmap_cnt) + flush_kernel_vmap_range(buffer->vaddr, buffer->len); + + if (!buffer->uncached) { + list_for_each_entry(a, &buffer->attachments, list) { + if (!a->mapped) + continue; + dma_sync_sgtable_for_device(a->dev, a->table, direction); + } + } + mutex_unlock(&buffer->lock); + + return 0; +} + +static int system_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) +{ + struct system_heap_buffer *buffer = dmabuf->priv; + struct sg_table *table = &buffer->sg_table; + unsigned long addr = vma->vm_start; + struct sg_page_iter piter; + int ret; + + if (buffer->uncached) + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + + for_each_sgtable_page(table, &piter, vma->vm_pgoff) { + struct page *page = sg_page_iter_page(&piter); + + ret = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE, + vma->vm_page_prot); + if (ret) + return ret; + addr += PAGE_SIZE; + if (addr >= vma->vm_end) + return 0; + } + return 0; +} + +static void *system_heap_do_vmap(struct system_heap_buffer *buffer) +{ + struct sg_table *table = &buffer->sg_table; + int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE; + struct page **pages = vmalloc(sizeof(struct page *) * npages); + struct page **tmp = pages; + struct sg_page_iter piter; + pgprot_t pgprot = PAGE_KERNEL; + void *vaddr; + + if (!pages) + return ERR_PTR(-ENOMEM); + + if (buffer->uncached) + pgprot = pgprot_writecombine(PAGE_KERNEL); + + for_each_sgtable_page(table, &piter, 0) { + WARN_ON(tmp - pages >= npages); + *tmp++ = sg_page_iter_page(&piter); + } + + vaddr = vmap(pages, npages, VM_MAP, pgprot); + vfree(pages); + + if (!vaddr) + return ERR_PTR(-ENOMEM); + + return vaddr; +} + +static void *system_heap_vmap(struct dma_buf *dmabuf) +{ + struct system_heap_buffer *buffer = dmabuf->priv; + void *vaddr; + + mutex_lock(&buffer->lock); + if (buffer->vmap_cnt) { + buffer->vmap_cnt++; + vaddr = buffer->vaddr; + goto out; + } + + vaddr = system_heap_do_vmap(buffer); + if (IS_ERR(vaddr)) + goto out; + + buffer->vaddr = vaddr; + buffer->vmap_cnt++; +out: + mutex_unlock(&buffer->lock); - for (pg = 0; pg < helper_buffer->pagecount; pg++) { + return vaddr; +} + +static void system_heap_vunmap(struct dma_buf *dmabuf, void *vaddr) +{ + struct system_heap_buffer *buffer = dmabuf->priv; + + mutex_lock(&buffer->lock); + if (!--buffer->vmap_cnt) { + vunmap(buffer->vaddr); + buffer->vaddr = NULL; + } + mutex_unlock(&buffer->lock); +} + +static int system_heap_zero_buffer(struct system_heap_buffer *buffer) +{ + struct sg_table *sgt = &buffer->sg_table; + struct sg_page_iter piter; + struct page *p; + void *vaddr; + int ret = 0; + + for_each_sgtable_page(sgt, &piter, 0) { + p = sg_page_iter_page(&piter); + vaddr = kmap_atomic(p); + memset(vaddr, 0, PAGE_SIZE); + kunmap_atomic(vaddr); + } + + return ret; +} + +static void system_heap_buf_free(struct deferred_freelist_item *item, + enum df_reason reason) +{ + struct system_heap_buffer *buffer; + struct sg_table *table; + struct scatterlist *sg; + int i, j; + + buffer = container_of(item, struct system_heap_buffer, deferred_free); + /* Zero the buffer pages before adding back to the pool */ + if (reason == DF_NORMAL) + if (system_heap_zero_buffer(buffer)) + reason = DF_UNDER_PRESSURE; // On failure, just free + + table = &buffer->sg_table; + for_each_sg(table->sgl, sg, table->nents, i) { + struct page *page = sg_page(sg); + + if (reason == DF_UNDER_PRESSURE) { + __free_pages(page, compound_order(page)); + } else { + for (j = 0; j < NUM_ORDERS; j++) { + if (compound_order(page) == orders[j]) + break; + } + dmabuf_page_pool_free(pools[j], page); + } + } + sg_free_table(table); + kfree(buffer); +} + +static void system_heap_dma_buf_release(struct dma_buf *dmabuf) +{ + struct system_heap_buffer *buffer = dmabuf->priv; + int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE; + + deferred_free(&buffer->deferred_free, system_heap_buf_free, npages); +} + +static const struct dma_buf_ops system_heap_buf_ops = { + .attach = system_heap_attach, + .detach = system_heap_detach, + .map_dma_buf = system_heap_map_dma_buf, + .unmap_dma_buf = system_heap_unmap_dma_buf, + .begin_cpu_access = system_heap_dma_buf_begin_cpu_access, + .end_cpu_access = system_heap_dma_buf_end_cpu_access, + .mmap = system_heap_mmap, + .vmap = system_heap_vmap, + .vunmap = system_heap_vunmap, + .release = system_heap_dma_buf_release, +}; + +static struct page *alloc_largest_available(unsigned long size, + unsigned int max_order) +{ + struct page *page; + int i; + + for (i = 0; i < NUM_ORDERS; i++) { + if (size < (PAGE_SIZE << orders[i])) + continue; + if (max_order < orders[i]) + continue; + page = dmabuf_page_pool_alloc(pools[i]); + if (!page) + continue; + return page; + } + return NULL; +} + +static struct dma_buf *system_heap_do_allocate(struct dma_heap *heap, + unsigned long len, + unsigned long fd_flags, + unsigned long heap_flags, + bool uncached) +{ + struct system_heap_buffer *buffer; + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + unsigned long size_remaining = len; + unsigned int max_order = orders[0]; + struct dma_buf *dmabuf; + struct sg_table *table; + struct scatterlist *sg; + struct list_head pages; + struct page *page, *tmp_page; + int i, ret = -ENOMEM; + + buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); + if (!buffer) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&buffer->attachments); + mutex_init(&buffer->lock); + buffer->heap = heap; + buffer->len = len; + buffer->uncached = uncached; + + INIT_LIST_HEAD(&pages); + i = 0; + while (size_remaining > 0) { /* * Avoid trying to allocate memory if the process - * has been killed by by SIGKILL + * has been killed by SIGKILL */ if (fatal_signal_pending(current)) - goto err1; + goto free_buffer; + + page = alloc_largest_available(size_remaining, max_order); + if (!page) + goto free_buffer; + + list_add_tail(&page->lru, &pages); + size_remaining -= page_size(page); + max_order = compound_order(page); + i++; + } - helper_buffer->pages[pg] = alloc_page(GFP_KERNEL | __GFP_ZERO); - if (!helper_buffer->pages[pg]) - goto err1; + table = &buffer->sg_table; + if (sg_alloc_table(table, i, GFP_KERNEL)) + goto free_buffer; + + sg = table->sgl; + list_for_each_entry_safe(page, tmp_page, &pages, lru) { + sg_set_page(sg, page, page_size(page), 0); + sg = sg_next(sg); + list_del(&page->lru); } /* create the dmabuf */ - dmabuf = heap_helper_export_dmabuf(helper_buffer, fd_flags); + exp_info.exp_name = dma_heap_get_name(heap); + exp_info.ops = &system_heap_buf_ops; + exp_info.size = buffer->len; + exp_info.flags = fd_flags; + exp_info.priv = buffer; + dmabuf = dma_buf_export(&exp_info); if (IS_ERR(dmabuf)) { ret = PTR_ERR(dmabuf); - goto err1; + goto free_pages; + } + + /* + * For uncached buffers, we need to initially flush cpu cache, since + * the __GFP_ZERO on the allocation means the zeroing was done by the + * cpu and thus it is likely cached. Map (and implicitly flush) and + * unmap it now so we don't get corruption later on. + */ + if (buffer->uncached) { + dma_map_sgtable(dma_heap_get_dev(heap), table, DMA_BIDIRECTIONAL, 0); + dma_unmap_sgtable(dma_heap_get_dev(heap), table, DMA_BIDIRECTIONAL, 0); } - helper_buffer->dmabuf = dmabuf; + return dmabuf; - ret = dma_buf_fd(dmabuf, fd_flags); - if (ret < 0) { - dma_buf_put(dmabuf); - /* just return, as put will call release and that will free */ - return ret; +free_pages: + for_each_sgtable_sg(table, sg, i) { + struct page *p = sg_page(sg); + + __free_pages(p, compound_order(p)); } + sg_free_table(table); +free_buffer: + list_for_each_entry_safe(page, tmp_page, &pages, lru) + __free_pages(page, compound_order(page)); + kfree(buffer); - return ret; + return ERR_PTR(ret); +} -err1: - while (pg > 0) - __free_page(helper_buffer->pages[--pg]); - kfree(helper_buffer->pages); -err0: - kfree(helper_buffer); +static struct dma_buf *system_heap_allocate(struct dma_heap *heap, + unsigned long len, + unsigned long fd_flags, + unsigned long heap_flags) +{ + return system_heap_do_allocate(heap, len, fd_flags, heap_flags, false); +} - return ret; +static long system_get_pool_size(struct dma_heap *heap) +{ + int i; + long num_pages = 0; + struct dmabuf_page_pool **pool; + + pool = pools; + for (i = 0; i < NUM_ORDERS; i++, pool++) { + num_pages += ((*pool)->count[POOL_LOWPAGE] + + (*pool)->count[POOL_HIGHPAGE]) << (*pool)->order; + } + + return num_pages << PAGE_SHIFT; } static const struct dma_heap_ops system_heap_ops = { .allocate = system_heap_allocate, + .get_pool_size = system_get_pool_size, +}; + +static struct dma_buf *system_uncached_heap_allocate(struct dma_heap *heap, + unsigned long len, + unsigned long fd_flags, + unsigned long heap_flags) +{ + return system_heap_do_allocate(heap, len, fd_flags, heap_flags, true); +} + +/* Dummy function to be used until we can call coerce_mask_and_coherent */ +static struct dma_buf *system_uncached_heap_not_initialized(struct dma_heap *heap, + unsigned long len, + unsigned long fd_flags, + unsigned long heap_flags) +{ + return ERR_PTR(-EBUSY); +} + +static struct dma_heap_ops system_uncached_heap_ops = { + /* After system_heap_create is complete, we will swap this */ + .allocate = system_uncached_heap_not_initialized, }; static int system_heap_create(void) { struct dma_heap_export_info exp_info; - int ret = 0; + int i; + + for (i = 0; i < NUM_ORDERS; i++) { + pools[i] = dmabuf_page_pool_create(order_flags[i], orders[i]); + + if (!pools[i]) { + int j; + + pr_err("%s: page pool creation failed!\n", __func__); + for (j = 0; j < i; j++) + dmabuf_page_pool_destroy(pools[j]); + return -ENOMEM; + } + } exp_info.name = "system"; exp_info.ops = &system_heap_ops; @@ -115,9 +566,21 @@ static int system_heap_create(void) sys_heap = dma_heap_add(&exp_info); if (IS_ERR(sys_heap)) - ret = PTR_ERR(sys_heap); + return PTR_ERR(sys_heap); - return ret; + exp_info.name = "system-uncached"; + exp_info.ops = &system_uncached_heap_ops; + exp_info.priv = NULL; + + sys_uncached_heap = dma_heap_add(&exp_info); + if (IS_ERR(sys_uncached_heap)) + return PTR_ERR(sys_uncached_heap); + + dma_coerce_mask_and_coherent(dma_heap_get_dev(sys_uncached_heap), DMA_BIT_MASK(64)); + mb(); /* make sure we only set allocate after dma_mask is set */ + system_uncached_heap_ops.allocate = system_uncached_heap_allocate; + + return 0; } module_init(system_heap_create); MODULE_LICENSE("GPL v2"); diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c index 348b3a9170fa..3daa6c76b8dd 100644 --- a/drivers/dma-buf/sw_sync.c +++ b/drivers/dma-buf/sw_sync.c @@ -7,6 +7,8 @@ #include #include +#include +#include #include #include #include @@ -410,3 +412,13 @@ const struct file_operations sw_sync_debugfs_fops = { .unlocked_ioctl = sw_sync_ioctl, .compat_ioctl = compat_ptr_ioctl, }; + +static struct miscdevice sw_sync_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "sw_sync", + .fops = &sw_sync_debugfs_fops, +}; + +module_misc_device(sw_sync_dev); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c index 101394f16930..a2f906741ce0 100644 --- a/drivers/dma-buf/sync_debug.c +++ b/drivers/dma-buf/sync_debug.c @@ -8,6 +8,7 @@ #include #include "sync_debug.h" +#ifdef CONFIG_DEBUG_FS static struct dentry *dbgfs; static LIST_HEAD(sync_timeline_list_head); @@ -188,3 +189,4 @@ static __init int sync_debugfs_init(void) return 0; } late_initcall(sync_debugfs_init); +#endif diff --git a/drivers/dma-buf/sync_debug.h b/drivers/dma-buf/sync_debug.h index 6176e52ba2d7..ee84997da6b4 100644 --- a/drivers/dma-buf/sync_debug.h +++ b/drivers/dma-buf/sync_debug.h @@ -62,11 +62,18 @@ struct sync_pt { struct rb_node node; }; +#ifdef CONFIG_DEBUG_FS extern const struct file_operations sw_sync_debugfs_fops; void sync_timeline_debug_add(struct sync_timeline *obj); void sync_timeline_debug_remove(struct sync_timeline *obj); void sync_file_debug_add(struct sync_file *fence); void sync_file_debug_remove(struct sync_file *fence); +#else +static inline void sync_timeline_debug_add(struct sync_timeline *obj) {} +static inline void sync_timeline_debug_remove(struct sync_timeline *obj) {} +static inline void sync_file_debug_add(struct sync_file *fence) {} +static inline void sync_file_debug_remove(struct sync_file *fence) {} +#endif #endif /* _LINUX_SYNC_H */ diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index c08968c5ddf8..d9dbfda3c5f2 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -9,7 +9,7 @@ menu "Firmware Drivers" config ARM_SCMI_PROTOCOL tristate "ARM System Control and Management Interface (SCMI) Message Protocol" depends on ARM || ARM64 || COMPILE_TEST - depends on MAILBOX || HAVE_ARM_SMCCC_DISCOVERY + depends on MAILBOX help ARM System Control and Management Interface (SCMI) protocol is a set of operating system-independent software interfaces that are @@ -251,6 +251,13 @@ config QCOM_SCM_DOWNLOAD_MODE_DEFAULT Say Y here to enable "download mode" by default. +config ROCKCHIP_SIP + tristate "Rockchip SIP interface" + depends on HAVE_ARM_SMCCC && ARCH_ROCKCHIP + help + Say Y here if you want to enable SIP callbacks for Rockchip platforms + This option enables support for communicating with the ATF. + config TI_SCI_PROTOCOL tristate "TI System Control Interface (TISCI) Message Protocol" depends on TI_MESSAGE_MANAGER diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile index 5e013b6a3692..5198c938857b 100644 --- a/drivers/firmware/Makefile +++ b/drivers/firmware/Makefile @@ -16,6 +16,7 @@ obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o obj-$(CONFIG_RASPBERRYPI_FIRMWARE) += raspberrypi.o +obj-$(CONFIG_ROCKCHIP_SIP) += ../$(VENDOR_DRIVER_DIR)/firmware/ obj-$(CONFIG_FW_CFG_SYSFS) += qemu_fw_cfg.o obj-$(CONFIG_QCOM_SCM) += qcom_scm.o qcom_scm-smc.o qcom_scm-legacy.o obj-$(CONFIG_TI_SCI_PROTOCOL) += ti_sci.o diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index d1300fc003ed..9a4110ad1c20 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -495,6 +495,14 @@ config GPIO_REG A 32-bit single register GPIO fixed in/out implementation. This can be used to represent any register as a set of GPIO signals. +config GPIO_ROCKCHIP + tristate "Rockchip GPIO support" + depends on ARCH_ROCKCHIP || COMPILE_TEST + select GPIOLIB_IRQCHIP + default ARCH_ROCKCHIP + help + Say yes here to support GPIO on Rockchip SoCs. + config GPIO_SAMA5D2_PIOBU tristate "SAMA5D2 PIOBU GPIO support" depends on MFD_SYSCON diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index 09dada80ac34..2d00d22f1aa1 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -125,6 +125,7 @@ obj-$(CONFIG_GPIO_RCAR) += gpio-rcar.o obj-$(CONFIG_GPIO_RDA) += gpio-rda.o obj-$(CONFIG_GPIO_RDC321X) += gpio-rdc321x.o obj-$(CONFIG_GPIO_REG) += gpio-reg.o +obj-$(CONFIG_GPIO_ROCKCHIP) += ../$(VENDOR_DRIVER_DIR)/gpio/ obj-$(CONFIG_ARCH_SA1100) += gpio-sa1100.o obj-$(CONFIG_GPIO_SAMA5D2_PIOBU) += gpio-sama5d2-piobu.o obj-$(CONFIG_GPIO_SCH311X) += gpio-sch311x.o diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index 2e63274a4c2c..ab666917b1ab 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -1046,3 +1046,14 @@ void of_gpiochip_remove(struct gpio_chip *chip) { of_node_put(chip->of_node); } + +void of_gpio_dev_init(struct gpio_chip *gc, struct gpio_device *gdev) +{ + /* If the gpiochip has an assigned OF node this takes precedence */ + if (gc->of_node) + gdev->dev.of_node = gc->of_node; + else + gc->of_node = gdev->dev.of_node; + if (gdev->dev.of_node) + gdev->dev.fwnode = of_fwnode_handle(gdev->dev.of_node); +} diff --git a/drivers/gpio/gpiolib-of.h b/drivers/gpio/gpiolib-of.h index ed26664f1537..8af2bc899aab 100644 --- a/drivers/gpio/gpiolib-of.h +++ b/drivers/gpio/gpiolib-of.h @@ -15,6 +15,7 @@ int of_gpiochip_add(struct gpio_chip *gc); void of_gpiochip_remove(struct gpio_chip *gc); int of_gpio_get_count(struct device *dev, const char *con_id); bool of_gpio_need_valid_mask(const struct gpio_chip *gc); +void of_gpio_dev_init(struct gpio_chip *gc, struct gpio_device *gdev); #else static inline struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id, @@ -33,6 +34,10 @@ static inline bool of_gpio_need_valid_mask(const struct gpio_chip *gc) { return false; } +static inline void of_gpio_dev_init(struct gpio_chip *gc, + struct gpio_device *gdev) +{ +} #endif /* CONFIG_OF_GPIO */ extern struct notifier_block gpio_of_notifier; diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 4e9b3a95fa7c..dba6d72bdcb0 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -9,10 +9,10 @@ menuconfig DRM tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)" depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && HAS_DMA select DRM_PANEL_ORIENTATION_QUIRKS - select HDMI + select HDMI if !ROCKCHIP_MINI_KERNEL select FB_CMDLINE select I2C - select I2C_ALGOBIT + select I2C_ALGOBIT if !ROCKCHIP_MINI_KERNEL select DMA_SHARED_BUFFER select SYNC_FILE # gallium uses SYS_kcmp for os_same_file_description() to de-duplicate @@ -32,10 +32,30 @@ config DRM_MIPI_DBI depends on DRM select DRM_KMS_HELPER +config DRM_EDID + bool "EDID function for DRM" + depends on DRM + default y if !ROCKCHIP_MINI_KERNEL + help + DRM EDID read and parse function. + +config DRM_IGNORE_IOTCL_PERMIT + bool "Ignore drm ioctl permission" + depends on DRM && ANDROID && NO_GKI + config DRM_MIPI_DSI bool depends on DRM +config DRM_DP + bool "DRM DisplayPort support" + depends on DRM + depends on DRM_KMS_HELPER + default y if DRM_ANALOGIX_DP + default y if !ROCKCHIP_MINI_KERNEL + help + Choose this option to support DP interface. + config DRM_DP_AUX_CHARDEV bool "DRM DP AUX Interface" depends on DRM diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 81569009f884..a53dc7b0bc0e 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -7,7 +7,7 @@ drm-y := drm_auth.o drm_cache.o \ drm_file.o drm_gem.o drm_ioctl.o drm_irq.o \ drm_memory.o drm_drv.o \ drm_sysfs.o drm_hashtab.o drm_mm.o \ - drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o \ + drm_crtc.o drm_fourcc.o drm_modes.o \ drm_encoder_slave.o \ drm_trace_points.o drm_prime.o \ drm_rect.o drm_vma_manager.o drm_flip_work.o \ @@ -20,6 +20,7 @@ drm-y := drm_auth.o drm_cache.o \ drm_client_modeset.o drm_atomic_uapi.o drm_hdcp.o \ drm_managed.o drm_vblank_work.o +drm-$(CONFIG_DRM_EDID) += drm_edid.o drm-$(CONFIG_DRM_LEGACY) += drm_legacy_misc.o drm_bufs.o drm_context.o drm_dma.o drm_scatter.o drm_lock.o drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o drm-$(CONFIG_DRM_VM) += drm_vm.o @@ -39,10 +40,10 @@ obj-$(CONFIG_DRM_VRAM_HELPER) += drm_vram_helper.o drm_ttm_helper-y := drm_gem_ttm_helper.o obj-$(CONFIG_DRM_TTM_HELPER) += drm_ttm_helper.o -drm_kms_helper-y := drm_bridge_connector.o drm_crtc_helper.o drm_dp_helper.o \ +drm_kms_helper-y := drm_bridge_connector.o drm_crtc_helper.o \ drm_dsc.o drm_probe_helper.o \ - drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \ - drm_kms_helper_common.o drm_dp_dual_mode_helper.o \ + drm_plane_helper.o drm_atomic_helper.o \ + drm_kms_helper_common.o \ drm_simple_kms_helper.o drm_modeset_helper.o \ drm_scdc_helper.o drm_gem_framebuffer_helper.o \ drm_atomic_state_helper.o drm_damage_helper.o \ @@ -51,6 +52,8 @@ drm_kms_helper-y := drm_bridge_connector.o drm_crtc_helper.o drm_dp_helper.o \ drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o +drm_kms_helper-$(CONFIG_DRM_DP) += drm_dp_helper.o drm_dp_mst_topology.o \ + drm_dp_dual_mode_helper.o drm_kms_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o drm_kms_helper-$(CONFIG_DRM_DP_CEC) += drm_dp_cec.o diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig index 6c4cba09d23b..0c81a7f6dc8b 100644 --- a/drivers/gpu/drm/amd/display/Kconfig +++ b/drivers/gpu/drm/amd/display/Kconfig @@ -40,8 +40,6 @@ config DRM_AMD_DC_HDCP config DRM_AMD_DC_SI bool "AMD DC support for Southern Islands ASICs" - depends on DRM_AMDGPU_SI - depends on DRM_AMD_DC default n help Choose this option to enable new AMD DC support for SI asics diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index 4e82647a621e..3ac1c5725faa 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -126,6 +126,22 @@ config DRM_PARADE_PS8640 The PS8640 is a high-performance and low-power MIPI DSI to eDP converter +config DRM_RK630_TVE + tristate "ROCKCHIP RK630 TVE bridge" + depends on OF + depends on MFD_RK630 + select DRM_KMS_HELPER + help + ROCKCHIP TVE bridge chip RK630 driver. + +config DRM_RK1000_TVE + tristate "Rockchip RK1000 TVE bridge" + depends on OF + select DRM_KMS_HELPER + select MFD_RK1000 + help + Rockchip TVE bridge chip driver. + config DRM_SIL_SII8620 tristate "Silicon Image SII8620 HDMI/MHL bridge" depends on OF diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile index 2b3aff104e46..519e916ed045 100644 --- a/drivers/gpu/drm/bridge/Makefile +++ b/drivers/gpu/drm/bridge/Makefile @@ -8,6 +8,8 @@ obj-$(CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW) += megachips-stdpxxxx-ge-b850v obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o obj-$(CONFIG_DRM_PARADE_PS8640) += parade-ps8640.o +obj-$(CONFIG_DRM_RK630_TVE) += rk630-tve.o +obj-$(CONFIG_DRM_RK1000_TVE) += rk1000.o obj-$(CONFIG_DRM_SIL_SII8620) += sil-sii8620.o obj-$(CONFIG_DRM_SII902X) += sii902x.o obj-$(CONFIG_DRM_SII9234) += sii9234.o diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c index e8baa07450b7..3489b9702b16 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c @@ -8,11 +8,13 @@ #include #include +#include #include #include #include #include #include +#include #include #include #include @@ -35,11 +37,30 @@ static const bool verify_fast_training; +static const unsigned int analogix_dp_cable[] = { + EXTCON_DISP_DP, + EXTCON_NONE, +}; + struct bridge_init { struct i2c_client *client; struct device_node *node; }; +static bool analogix_dp_bandwidth_ok(struct analogix_dp_device *dp, + const struct drm_display_mode *mode, + unsigned int rate, unsigned int lanes) +{ + u32 max_bw, req_bw, bpp = 24; + + req_bw = mode->clock * bpp / 8; + max_bw = lanes * rate; + if (req_bw > max_bw) + return false; + + return true; +} + static int analogix_dp_init_dp(struct analogix_dp_device *dp) { int ret; @@ -64,6 +85,46 @@ static int analogix_dp_init_dp(struct analogix_dp_device *dp) return 0; } +static int analogix_dp_panel_prepare(struct analogix_dp_device *dp) +{ + int ret; + + mutex_lock(&dp->panel_lock); + + if (dp->panel_is_prepared) + goto out; + + ret = drm_panel_prepare(dp->plat_data->panel); + if (ret) + goto out; + + dp->panel_is_prepared = true; + +out: + mutex_unlock(&dp->panel_lock); + return 0; +} + +static int analogix_dp_panel_unprepare(struct analogix_dp_device *dp) +{ + int ret; + + mutex_lock(&dp->panel_lock); + + if (!dp->panel_is_prepared) + goto out; + + ret = drm_panel_unprepare(dp->plat_data->panel); + if (ret) + goto out; + + dp->panel_is_prepared = false; + +out: + mutex_unlock(&dp->panel_lock); + return 0; +} + static int analogix_dp_detect_hpd(struct analogix_dp_device *dp) { int timeout_loop = 0; @@ -108,6 +169,9 @@ static bool analogix_dp_detect_sink_psr(struct analogix_dp_device *dp) unsigned char psr_version; int ret; + if (!device_property_read_bool(dp->dev, "support-psr")) + return 0; + ret = drm_dp_dpcd_readb(&dp->aux, DP_PSR_SUPPORT, &psr_version); if (ret != 1) { dev_err(dp->dev, "failed to get PSR version, disable it\n"); @@ -216,8 +280,24 @@ static int analogix_dp_set_enhanced_mode(struct analogix_dp_device *dp) if (ret < 0) return ret; + if (!data) { + /* + * A setting of 1 indicates that this is an eDP device that + * uses only Enhanced Framing, independently of the setting by + * the source of ENHANCED_FRAME_EN + */ + ret = drm_dp_dpcd_readb(&dp->aux, DP_EDP_CONFIGURATION_CAP, + &data); + if (ret < 0) + return ret; + + data = !!(data & DP_FRAMING_CHANGE_CAP); + } + analogix_dp_enable_enhanced_mode(dp, data); + dp->link_train.enhanced_framing = data; + return 0; } @@ -233,32 +313,10 @@ static int analogix_dp_training_pattern_dis(struct analogix_dp_device *dp) return ret < 0 ? ret : 0; } -static void -analogix_dp_set_lane_lane_pre_emphasis(struct analogix_dp_device *dp, - int pre_emphasis, int lane) -{ - switch (lane) { - case 0: - analogix_dp_set_lane0_pre_emphasis(dp, pre_emphasis); - break; - case 1: - analogix_dp_set_lane1_pre_emphasis(dp, pre_emphasis); - break; - - case 2: - analogix_dp_set_lane2_pre_emphasis(dp, pre_emphasis); - break; - - case 3: - analogix_dp_set_lane3_pre_emphasis(dp, pre_emphasis); - break; - } -} - static int analogix_dp_link_start(struct analogix_dp_device *dp) { u8 buf[4]; - int lane, lane_count, pll_tries, retval; + int lane, lane_count, retval; lane_count = dp->link_train.lane_count; @@ -278,6 +336,14 @@ static int analogix_dp_link_start(struct analogix_dp_device *dp) retval = drm_dp_dpcd_write(&dp->aux, DP_LINK_BW_SET, buf, 2); if (retval < 0) return retval; + + /* Spread AMP if required, enable 8b/10b coding */ + buf[0] = analogix_dp_ssc_supported(dp) ? DP_SPREAD_AMP_0_5 : 0; + buf[1] = DP_SET_ANSI_8B10B; + retval = drm_dp_dpcd_write(&dp->aux, DP_DOWNSPREAD_CTRL, buf, 2); + if (retval < 0) + return retval; + /* set enhanced mode if available */ retval = analogix_dp_set_enhanced_mode(dp); if (retval < 0) { @@ -285,22 +351,12 @@ static int analogix_dp_link_start(struct analogix_dp_device *dp) return retval; } - /* Set TX pre-emphasis to minimum */ + /* Set TX voltage-swing and pre-emphasis to minimum */ for (lane = 0; lane < lane_count; lane++) - analogix_dp_set_lane_lane_pre_emphasis(dp, - PRE_EMPHASIS_LEVEL_0, lane); - - /* Wait for PLL lock */ - pll_tries = 0; - while (analogix_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) { - if (pll_tries == DP_TIMEOUT_LOOP_COUNT) { - dev_err(dp->dev, "Wait for PLL lock timed out\n"); - return -ETIMEDOUT; - } - - pll_tries++; - usleep_range(90, 120); - } + dp->link_train.training_lane[lane] = + DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | + DP_TRAIN_PRE_EMPH_LEVEL_0; + analogix_dp_set_lane_link_training(dp); /* Set training pattern 1 */ analogix_dp_set_training_pattern(dp, TRAINING_PTN1); @@ -383,54 +439,6 @@ static unsigned char analogix_dp_get_adjust_request_pre_emphasis( return ((link_value >> shift) & 0xc) >> 2; } -static void analogix_dp_set_lane_link_training(struct analogix_dp_device *dp, - u8 training_lane_set, int lane) -{ - switch (lane) { - case 0: - analogix_dp_set_lane0_link_training(dp, training_lane_set); - break; - case 1: - analogix_dp_set_lane1_link_training(dp, training_lane_set); - break; - - case 2: - analogix_dp_set_lane2_link_training(dp, training_lane_set); - break; - - case 3: - analogix_dp_set_lane3_link_training(dp, training_lane_set); - break; - } -} - -static unsigned int -analogix_dp_get_lane_link_training(struct analogix_dp_device *dp, - int lane) -{ - u32 reg; - - switch (lane) { - case 0: - reg = analogix_dp_get_lane0_link_training(dp); - break; - case 1: - reg = analogix_dp_get_lane1_link_training(dp); - break; - case 2: - reg = analogix_dp_get_lane2_link_training(dp); - break; - case 3: - reg = analogix_dp_get_lane3_link_training(dp); - break; - default: - WARN_ON(1); - return 0; - } - - return reg; -} - static void analogix_dp_reduce_link_rate(struct analogix_dp_device *dp) { analogix_dp_training_pattern_dis(dp); @@ -463,13 +471,27 @@ static void analogix_dp_get_adjust_training_lane(struct analogix_dp_device *dp, } } +static bool analogix_dp_tps3_supported(struct analogix_dp_device *dp) +{ + bool source_tps3_supported, sink_tps3_supported; + u8 dpcd = 0; + + source_tps3_supported = + dp->video_info.max_link_rate == DP_LINK_BW_5_4; + drm_dp_dpcd_readb(&dp->aux, DP_MAX_LANE_COUNT, &dpcd); + sink_tps3_supported = dpcd & DP_TPS3_SUPPORTED; + + return source_tps3_supported && sink_tps3_supported; +} + static int analogix_dp_process_clock_recovery(struct analogix_dp_device *dp) { int lane, lane_count, retval; u8 voltage_swing, pre_emphasis, training_lane; u8 link_status[2], adjust_request[2]; + u8 training_pattern = TRAINING_PTN2; - usleep_range(100, 101); + drm_dp_link_train_clock_recovery_delay(dp->dpcd); lane_count = dp->link_train.lane_count; @@ -483,12 +505,16 @@ static int analogix_dp_process_clock_recovery(struct analogix_dp_device *dp) return retval; if (analogix_dp_clock_recovery_ok(link_status, lane_count) == 0) { - /* set training pattern 2 for EQ */ - analogix_dp_set_training_pattern(dp, TRAINING_PTN2); + if (analogix_dp_tps3_supported(dp)) + training_pattern = TRAINING_PTN3; + + /* set training pattern for EQ */ + analogix_dp_set_training_pattern(dp, training_pattern); retval = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET, DP_LINK_SCRAMBLING_DISABLE | - DP_TRAINING_PATTERN_2); + (training_pattern == TRAINING_PTN3 ? + DP_TRAINING_PATTERN_3 : DP_TRAINING_PATTERN_2)); if (retval < 0) return retval; @@ -522,10 +548,7 @@ static int analogix_dp_process_clock_recovery(struct analogix_dp_device *dp) } analogix_dp_get_adjust_training_lane(dp, adjust_request); - - for (lane = 0; lane < lane_count; lane++) - analogix_dp_set_lane_link_training(dp, - dp->link_train.training_lane[lane], lane); + analogix_dp_set_lane_link_training(dp); retval = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, dp->link_train.training_lane, lane_count); @@ -537,11 +560,11 @@ static int analogix_dp_process_clock_recovery(struct analogix_dp_device *dp) static int analogix_dp_process_equalizer_training(struct analogix_dp_device *dp) { - int lane, lane_count, retval; + int lane_count, retval; u32 reg; u8 link_align, link_status[2], adjust_request[2]; - usleep_range(400, 401); + drm_dp_link_train_channel_eq_delay(dp->dpcd); lane_count = dp->link_train.lane_count; @@ -597,9 +620,7 @@ static int analogix_dp_process_equalizer_training(struct analogix_dp_device *dp) return -EIO; } - for (lane = 0; lane < lane_count; lane++) - analogix_dp_set_lane_link_training(dp, - dp->link_train.training_lane[lane], lane); + analogix_dp_set_lane_link_training(dp); retval = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, dp->link_train.training_lane, lane_count); @@ -609,10 +630,11 @@ static int analogix_dp_process_equalizer_training(struct analogix_dp_device *dp) return 0; } -static void analogix_dp_get_max_rx_bandwidth(struct analogix_dp_device *dp, - u8 *bandwidth) +static int analogix_dp_get_max_rx_bandwidth(struct analogix_dp_device *dp, + u8 *bandwidth) { u8 data; + int ret; /* * For DP rev.1.1, Maximum link rate of Main Link lanes @@ -620,28 +642,41 @@ static void analogix_dp_get_max_rx_bandwidth(struct analogix_dp_device *dp, * For DP rev.1.2, Maximum link rate of Main Link lanes * 0x06 = 1.62 Gbps, 0x0a = 2.7 Gbps, 0x14 = 5.4Gbps */ - drm_dp_dpcd_readb(&dp->aux, DP_MAX_LINK_RATE, &data); + ret = drm_dp_dpcd_readb(&dp->aux, DP_MAX_LINK_RATE, &data); + if (ret < 0) + return ret; + *bandwidth = data; + + return 0; } -static void analogix_dp_get_max_rx_lane_count(struct analogix_dp_device *dp, - u8 *lane_count) +static int analogix_dp_get_max_rx_lane_count(struct analogix_dp_device *dp, + u8 *lane_count) { u8 data; + int ret; /* * For DP rev.1.1, Maximum number of Main Link lanes * 0x01 = 1 lane, 0x02 = 2 lanes, 0x04 = 4 lanes */ - drm_dp_dpcd_readb(&dp->aux, DP_MAX_LANE_COUNT, &data); + ret = drm_dp_dpcd_readb(&dp->aux, DP_MAX_LANE_COUNT, &data); + if (ret < 0) + return ret; + *lane_count = DPCD_MAX_LANE_COUNT(data); + + return 0; } static int analogix_dp_full_link_train(struct analogix_dp_device *dp, u32 max_lanes, u32 max_rate) { + struct video_info *video = &dp->video_info; int retval = 0; bool training_finished = false; + u8 dpcd; /* * MACRO_RST must be applied after the PLL_LOCK to avoid @@ -667,6 +702,16 @@ static int analogix_dp_full_link_train(struct analogix_dp_device *dp, dp->link_train.lane_count = (u8)LANE_COUNT1; } + if (!analogix_dp_bandwidth_ok(dp, &video->mode, + drm_dp_bw_code_to_link_rate(dp->link_train.link_rate), + dp->link_train.lane_count)) { + dev_err(dp->dev, "bandwidth overflow\n"); + return -EINVAL; + } + + drm_dp_dpcd_readb(&dp->aux, DP_MAX_DOWNSPREAD, &dpcd); + dp->link_train.ssc = !!(dpcd & DP_MAX_DOWNSPREAD_0_5); + /* Setup TX lane count & rate */ if (dp->link_train.lane_count > max_lanes) dp->link_train.lane_count = max_lanes; @@ -711,27 +756,15 @@ static int analogix_dp_full_link_train(struct analogix_dp_device *dp, static int analogix_dp_fast_link_train(struct analogix_dp_device *dp) { - int i, ret; + int ret; u8 link_align, link_status[2]; - enum pll_status status; analogix_dp_reset_macro(dp); analogix_dp_set_link_bandwidth(dp, dp->link_train.link_rate); analogix_dp_set_lane_count(dp, dp->link_train.lane_count); - - for (i = 0; i < dp->link_train.lane_count; i++) { - analogix_dp_set_lane_link_training(dp, - dp->link_train.training_lane[i], i); - } - - ret = readx_poll_timeout(analogix_dp_get_pll_lock_status, dp, status, - status != PLL_UNLOCKED, 120, - 120 * DP_TIMEOUT_LOOP_COUNT); - if (ret) { - DRM_DEV_ERROR(dp->dev, "Wait for pll lock failed %d\n", ret); - return ret; - } + analogix_dp_set_lane_link_training(dp); + analogix_dp_enable_enhanced_mode(dp, dp->link_train.enhanced_framing); /* source Set training pattern 1 */ analogix_dp_set_training_pattern(dp, TRAINING_PTN1); @@ -742,7 +775,6 @@ static int analogix_dp_fast_link_train(struct analogix_dp_device *dp) /* From DP spec, pattern must be on-screen for a minimum 500us */ usleep_range(500, 600); - /* TODO: enhanced_mode?*/ analogix_dp_set_training_pattern(dp, DP_NONE); /* @@ -884,38 +916,21 @@ static int analogix_dp_enable_scramble(struct analogix_dp_device *dp, return ret < 0 ? ret : 0; } -static irqreturn_t analogix_dp_hardirq(int irq, void *arg) +static irqreturn_t analogix_dp_hpd_irq_handler(int irq, void *arg) { struct analogix_dp_device *dp = arg; - irqreturn_t ret = IRQ_NONE; - enum dp_irq_type irq_type; - irq_type = analogix_dp_get_irq_type(dp); - if (irq_type != DP_IRQ_TYPE_UNKNOWN) { - analogix_dp_mute_hpd_interrupt(dp); - ret = IRQ_WAKE_THREAD; - } + if (dp->drm_dev) + drm_helper_hpd_irq_event(dp->drm_dev); - return ret; + return IRQ_HANDLED; } static irqreturn_t analogix_dp_irq_thread(int irq, void *arg) { struct analogix_dp_device *dp = arg; - enum dp_irq_type irq_type; - irq_type = analogix_dp_get_irq_type(dp); - if (irq_type & DP_IRQ_TYPE_HP_CABLE_IN || - irq_type & DP_IRQ_TYPE_HP_CABLE_OUT) { - dev_dbg(dp->dev, "Detected cable status changed!\n"); - if (dp->drm_dev) - drm_helper_hpd_irq_event(dp->drm_dev); - } - - if (irq_type != DP_IRQ_TYPE_UNKNOWN) { - analogix_dp_clear_hotplug_interrupts(dp); - analogix_dp_unmute_hpd_interrupt(dp); - } + analogix_dp_irq_handler(dp); return IRQ_HANDLED; } @@ -936,16 +951,73 @@ static int analogix_dp_fast_link_train_detection(struct analogix_dp_device *dp) return 0; } +static int analogix_dp_link_power_up(struct analogix_dp_device *dp) +{ + u8 value; + int ret; + + if (dp->dpcd[DP_DPCD_REV] < 0x11) + return 0; + + ret = drm_dp_dpcd_readb(&dp->aux, DP_SET_POWER, &value); + if (ret < 0) + return ret; + + value &= ~DP_SET_POWER_MASK; + value |= DP_SET_POWER_D0; + + ret = drm_dp_dpcd_writeb(&dp->aux, DP_SET_POWER, value); + if (ret < 0) + return ret; + + usleep_range(1000, 2000); + + return 0; +} + +static int analogix_dp_link_power_down(struct analogix_dp_device *dp) +{ + u8 value; + int ret; + + if (dp->dpcd[DP_DPCD_REV] < 0x11) + return 0; + + ret = drm_dp_dpcd_readb(&dp->aux, DP_SET_POWER, &value); + if (ret < 0) + return ret; + + value &= ~DP_SET_POWER_MASK; + value |= DP_SET_POWER_D3; + + ret = drm_dp_dpcd_writeb(&dp->aux, DP_SET_POWER, value); + if (ret < 0) + return ret; + + return 0; +} + static int analogix_dp_commit(struct analogix_dp_device *dp) { + struct video_info *video = &dp->video_info; int ret; - /* Keep the panel disabled while we configure video */ - if (dp->plat_data->panel) { - if (drm_panel_disable(dp->plat_data->panel)) - DRM_ERROR("failed to disable the panel\n"); + ret = drm_dp_read_dpcd_caps(&dp->aux, dp->dpcd); + if (ret < 0) { + dev_err(dp->dev, "failed to read dpcd caps: %d\n", ret); + return ret; } + ret = analogix_dp_link_power_up(dp); + if (ret) { + dev_err(dp->dev, "failed to power up link: %d\n", ret); + return ret; + } + + if (device_property_read_bool(dp->dev, "panel-self-test")) + return drm_dp_dpcd_writeb(&dp->aux, DP_EDP_CONFIGURATION_SET, + DP_PANEL_SELF_TEST_ENABLE); + ret = analogix_dp_train_link(dp); if (ret) { dev_err(dp->dev, "unable to do link train, ret=%d\n", ret); @@ -959,21 +1031,17 @@ static int analogix_dp_commit(struct analogix_dp_device *dp) } analogix_dp_init_video(dp); + analogix_dp_set_video_format(dp); + + if (video->video_bist_enable) + analogix_dp_video_bist_enable(dp); + ret = analogix_dp_config_video(dp); if (ret) { dev_err(dp->dev, "unable to config video\n"); return ret; } - /* Safe to enable the panel now */ - if (dp->plat_data->panel) { - ret = drm_panel_enable(dp->plat_data->panel); - if (ret) { - DRM_ERROR("failed to enable the panel\n"); - return ret; - } - } - /* Check whether panel supports fast training */ ret = analogix_dp_fast_link_train_detection(dp); if (ret) @@ -1010,9 +1078,20 @@ static int analogix_dp_enable_psr(struct analogix_dp_device *dp) psr_vsc.db[1] = EDP_VSC_PSR_STATE_ACTIVE | EDP_VSC_PSR_CRC_VALUES_VALID; ret = analogix_dp_send_psr_spd(dp, &psr_vsc, true); - if (!ret) + if (!ret) { analogix_dp_set_analog_power_down(dp, POWER_ALL, true); + if (dp->phy) { + union phy_configure_opts phy_cfg = {0}; + + phy_cfg.dp.lanes = 0; + phy_cfg.dp.set_lanes = true; + ret = phy_configure(dp->phy, &phy_cfg); + if (ret) + return ret; + } + } + return ret; } @@ -1058,70 +1137,24 @@ static int analogix_dp_disable_psr(struct analogix_dp_device *dp) return analogix_dp_send_psr_spd(dp, &psr_vsc, true); } -/* - * This function is a bit of a catch-all for panel preparation, hopefully - * simplifying the logic of functions that need to prepare/unprepare the panel - * below. - * - * If @prepare is true, this function will prepare the panel. Conversely, if it - * is false, the panel will be unprepared. - * - * If @is_modeset_prepare is true, the function will disregard the current state - * of the panel and either prepare/unprepare the panel based on @prepare. Once - * it finishes, it will update dp->panel_is_modeset to reflect the current state - * of the panel. - */ -static int analogix_dp_prepare_panel(struct analogix_dp_device *dp, - bool prepare, bool is_modeset_prepare) -{ - int ret = 0; - - if (!dp->plat_data->panel) - return 0; - - mutex_lock(&dp->panel_lock); - - /* - * Exit early if this is a temporary prepare/unprepare and we're already - * modeset (since we neither want to prepare twice or unprepare early). - */ - if (dp->panel_is_modeset && !is_modeset_prepare) - goto out; - - if (prepare) - ret = drm_panel_prepare(dp->plat_data->panel); - else - ret = drm_panel_unprepare(dp->plat_data->panel); - - if (ret) - goto out; - - if (is_modeset_prepare) - dp->panel_is_modeset = prepare; - -out: - mutex_unlock(&dp->panel_lock); - return ret; -} - static int analogix_dp_get_modes(struct drm_connector *connector) { struct analogix_dp_device *dp = to_dp(connector); struct edid *edid; int ret, num_modes = 0; - if (dp->plat_data->panel) { + if (dp->plat_data->panel) num_modes += drm_panel_get_modes(dp->plat_data->panel, connector); - } else { - ret = analogix_dp_prepare_panel(dp, true, false); - if (ret) { - DRM_ERROR("Failed to prepare panel (%d)\n", ret); + + if (!num_modes) { + ret = analogix_dp_phy_power_on(dp); + if (ret) return 0; - } - pm_runtime_get_sync(dp->dev); + if (dp->plat_data->panel) + analogix_dp_panel_prepare(dp); + edid = drm_get_edid(connector, &dp->aux.ddc); - pm_runtime_put(dp->dev); if (edid) { drm_connector_update_edid_property(&dp->connector, edid); @@ -1129,14 +1162,19 @@ static int analogix_dp_get_modes(struct drm_connector *connector) kfree(edid); } - ret = analogix_dp_prepare_panel(dp, false, false); - if (ret) - DRM_ERROR("Failed to unprepare panel (%d)\n", ret); + analogix_dp_phy_power_off(dp); } if (dp->plat_data->get_modes) num_modes += dp->plat_data->get_modes(dp->plat_data, connector); + if (num_modes > 0 && dp->plat_data->split_mode) { + struct drm_display_mode *mode; + + list_for_each_entry(mode, &connector->probed_modes, head) + dp->plat_data->convert_to_split_mode(mode); + } + return num_modes; } @@ -1182,38 +1220,76 @@ static const struct drm_connector_helper_funcs analogix_dp_connector_helper_func }; static enum drm_connector_status -analogix_dp_detect(struct drm_connector *connector, bool force) +analogix_dp_detect(struct analogix_dp_device *dp) { - struct analogix_dp_device *dp = to_dp(connector); enum drm_connector_status status = connector_status_disconnected; int ret; - if (dp->plat_data->panel) - return connector_status_connected; - - ret = analogix_dp_prepare_panel(dp, true, false); + ret = analogix_dp_phy_power_on(dp); if (ret) { - DRM_ERROR("Failed to prepare panel (%d)\n", ret); + extcon_set_state_sync(dp->extcon, EXTCON_DISP_DP, false); return connector_status_disconnected; } - if (!analogix_dp_detect_hpd(dp)) + if (dp->plat_data->panel) + analogix_dp_panel_prepare(dp); + + if (!analogix_dp_detect_hpd(dp)) { + ret = analogix_dp_get_max_rx_bandwidth(dp, &dp->link_train.link_rate); + if (ret) { + dev_err(dp->dev, "failed to read max link rate\n"); + goto out; + } + + ret = analogix_dp_get_max_rx_lane_count(dp, &dp->link_train.lane_count); + if (ret) { + dev_err(dp->dev, "failed to read max lane count\n"); + goto out; + } + status = connector_status_connected; + } - ret = analogix_dp_prepare_panel(dp, false, false); - if (ret) - DRM_ERROR("Failed to unprepare panel (%d)\n", ret); +out: + analogix_dp_phy_power_off(dp); + + if (status == connector_status_connected) + extcon_set_state_sync(dp->extcon, EXTCON_DISP_DP, true); + else + extcon_set_state_sync(dp->extcon, EXTCON_DISP_DP, false); return status; } +static enum drm_connector_status +analogix_dp_connector_detect(struct drm_connector *connector, bool force) +{ + struct analogix_dp_device *dp = to_dp(connector); + + if (dp->plat_data->right && analogix_dp_detect(dp->plat_data->right) != connector_status_connected) + return connector_status_disconnected; + + return analogix_dp_detect(dp); +} + +static void analogix_dp_connector_force(struct drm_connector *connector) +{ + struct analogix_dp_device *dp = to_dp(connector); + + if (connector->status == connector_status_connected) + extcon_set_state_sync(dp->extcon, EXTCON_DISP_DP, true); + else + extcon_set_state_sync(dp->extcon, EXTCON_DISP_DP, false); +} + static const struct drm_connector_funcs analogix_dp_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, - .detect = analogix_dp_detect, + .detect = analogix_dp_connector_detect, .destroy = drm_connector_cleanup, .reset = drm_atomic_helper_connector_reset, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .force = analogix_dp_connector_force, }; static int analogix_dp_bridge_attach(struct drm_bridge *bridge, @@ -1224,10 +1300,8 @@ static int analogix_dp_bridge_attach(struct drm_bridge *bridge, struct drm_connector *connector = NULL; int ret = 0; - if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) { - DRM_ERROR("Fix bridge driver to make connector optional!"); - return -EINVAL; - } + if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) + return 0; if (!bridge->encoder) { DRM_ERROR("Parent encoder object not found"); @@ -1240,6 +1314,8 @@ static int analogix_dp_bridge_attach(struct drm_bridge *bridge, ret = drm_connector_init(dp->drm_dev, connector, &analogix_dp_connector_funcs, + dp->plat_data->bridge ? + dp->plat_data->bridge->type : DRM_MODE_CONNECTOR_eDP); if (ret) { DRM_ERROR("Failed to initialize connector with drm\n"); @@ -1268,6 +1344,14 @@ static int analogix_dp_bridge_attach(struct drm_bridge *bridge, return 0; } +static void analogix_dp_bridge_detach(struct drm_bridge *bridge) +{ + struct analogix_dp_device *dp = bridge->driver_private; + + if (dp->plat_data->detach) + dp->plat_data->detach(dp->plat_data, bridge); +} + static struct drm_crtc *analogix_dp_get_old_crtc(struct analogix_dp_device *dp, struct drm_atomic_state *state) @@ -1291,7 +1375,8 @@ static struct drm_crtc *analogix_dp_get_new_crtc(struct analogix_dp_device *dp, struct drm_atomic_state *state) { - struct drm_encoder *encoder = dp->encoder; + struct drm_bridge *bridge = &dp->bridge; + struct drm_encoder *encoder = bridge->encoder; struct drm_connector *connector; struct drm_connector_state *conn_state; @@ -1314,7 +1399,6 @@ analogix_dp_bridge_atomic_pre_enable(struct drm_bridge *bridge, struct analogix_dp_device *dp = bridge->driver_private; struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state; - int ret; crtc = analogix_dp_get_new_crtc(dp, old_state); if (!crtc) @@ -1325,27 +1409,20 @@ analogix_dp_bridge_atomic_pre_enable(struct drm_bridge *bridge, if (old_crtc_state && old_crtc_state->self_refresh_active) return; - ret = analogix_dp_prepare_panel(dp, true, true); - if (ret) - DRM_ERROR("failed to setup the panel ret = %d\n", ret); + if (dp->plat_data->panel) + analogix_dp_panel_prepare(dp); } static int analogix_dp_set_bridge(struct analogix_dp_device *dp) { int ret; - pm_runtime_get_sync(dp->dev); - - ret = clk_prepare_enable(dp->clock); - if (ret < 0) { - DRM_ERROR("Failed to prepare_enable the clock clk [%d]\n", ret); - goto out_dp_clk_pre; - } - if (dp->plat_data->power_on_start) dp->plat_data->power_on_start(dp->plat_data); - phy_power_on(dp->phy); + ret = analogix_dp_phy_power_on(dp); + if (ret) + return ret; ret = analogix_dp_init_dp(dp); if (ret) @@ -1363,28 +1440,35 @@ static int analogix_dp_set_bridge(struct analogix_dp_device *dp) } ret = analogix_dp_commit(dp); - if (ret) { + if (ret < 0) { DRM_ERROR("dp commit error, ret = %d\n", ret); goto out_dp_init; } + if (dp->plat_data->panel) + drm_panel_enable(dp->plat_data->panel); + if (dp->plat_data->power_on_end) dp->plat_data->power_on_end(dp->plat_data); - enable_irq(dp->irq); return 0; out_dp_init: - phy_power_off(dp->phy); + analogix_dp_phy_power_off(dp); if (dp->plat_data->power_off) dp->plat_data->power_off(dp->plat_data); - clk_disable_unprepare(dp->clock); -out_dp_clk_pre: - pm_runtime_put_sync(dp->dev); - return ret; } +static void analogix_dp_modeset_retry_work_fn(struct work_struct *work) +{ + struct analogix_dp_device *dp = + container_of(work, typeof(*dp), modeset_retry_work); + + /* Send Hotplug uevent so userspace can reprobe */ + drm_kms_helper_hotplug_event(dp->bridge.dev); +} + static void analogix_dp_bridge_atomic_enable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) @@ -1423,12 +1507,14 @@ analogix_dp_bridge_atomic_enable(struct drm_bridge *bridge, usleep_range(10, 11); } dev_err(dp->dev, "too many times retry set bridge, give it up\n"); + + /* Schedule a Hotplug Uevent to userspace to start modeset */ + schedule_work(&dp->modeset_retry_work); } static void analogix_dp_bridge_disable(struct drm_bridge *bridge) { struct analogix_dp_device *dp = bridge->driver_private; - int ret; if (dp->dpms_mode != DRM_MODE_DPMS_ON) return; @@ -1440,21 +1526,17 @@ static void analogix_dp_bridge_disable(struct drm_bridge *bridge) } } - disable_irq(dp->irq); + if (!analogix_dp_get_plug_in_status(dp)) + analogix_dp_link_power_down(dp); if (dp->plat_data->power_off) dp->plat_data->power_off(dp->plat_data); analogix_dp_set_analog_power_down(dp, POWER_ALL, 1); - phy_power_off(dp->phy); - - clk_disable_unprepare(dp->clock); - - pm_runtime_put_sync(dp->dev); + analogix_dp_phy_power_off(dp); - ret = analogix_dp_prepare_panel(dp, false, true); - if (ret) - DRM_ERROR("failed to setup the panel ret = %d\n", ret); + if (dp->plat_data->panel) + analogix_dp_panel_unprepare(dp); dp->fast_train_enable = false; dp->psr_supported = false; @@ -1526,14 +1608,19 @@ analogix_dp_bridge_atomic_post_disable(struct drm_bridge *bridge, static void analogix_dp_bridge_mode_set(struct drm_bridge *bridge, const struct drm_display_mode *orig_mode, - const struct drm_display_mode *mode) + const struct drm_display_mode *adj_mode) { struct analogix_dp_device *dp = bridge->driver_private; struct drm_display_info *display_info = &dp->connector.display_info; struct video_info *video = &dp->video_info; + struct drm_display_mode *mode = &video->mode; struct device_node *dp_node = dp->dev->of_node; int vic; + drm_mode_copy(mode, adj_mode); + if (dp->plat_data->split_mode) + dp->plat_data->convert_to_origin_mode(mode); + /* Input video interlaces & hsync pol & vsync pol */ video->interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE); video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC); @@ -1542,16 +1629,12 @@ static void analogix_dp_bridge_mode_set(struct drm_bridge *bridge, /* Input video dynamic_range & colorimetry */ vic = drm_match_cea_mode(mode); if ((vic == 6) || (vic == 7) || (vic == 21) || (vic == 22) || - (vic == 2) || (vic == 3) || (vic == 17) || (vic == 18)) { + (vic == 2) || (vic == 3) || (vic == 17) || (vic == 18)) video->dynamic_range = CEA; - video->ycbcr_coeff = COLOR_YCBCR601; - } else if (vic) { + else if (vic) video->dynamic_range = CEA; - video->ycbcr_coeff = COLOR_YCBCR709; - } else { + else video->dynamic_range = VESA; - video->ycbcr_coeff = COLOR_YCBCR709; - } /* Input vide bpc and color_formats */ switch (display_info->bpc) { @@ -1571,12 +1654,16 @@ static void analogix_dp_bridge_mode_set(struct drm_bridge *bridge, video->color_depth = COLOR_8; break; } - if (display_info->color_formats & DRM_COLOR_FORMAT_YCRCB444) + if (display_info->color_formats & DRM_COLOR_FORMAT_YCRCB444) { video->color_space = COLOR_YCBCR444; - else if (display_info->color_formats & DRM_COLOR_FORMAT_YCRCB422) + video->ycbcr_coeff = COLOR_YCBCR709; + } else if (display_info->color_formats & DRM_COLOR_FORMAT_YCRCB422) { video->color_space = COLOR_YCBCR422; - else + video->ycbcr_coeff = COLOR_YCBCR709; + } else { video->color_space = COLOR_RGB; + video->ycbcr_coeff = COLOR_YCBCR601; + } /* * NOTE: those property parsing code is used for providing backward @@ -1601,6 +1688,56 @@ static void analogix_dp_bridge_mode_set(struct drm_bridge *bridge, video->interlaced = true; } +static bool analogix_dp_link_config_validate(u8 link_rate, u8 lane_count) +{ + switch (link_rate) { + case DP_LINK_BW_1_62: + case DP_LINK_BW_2_7: + case DP_LINK_BW_5_4: + break; + default: + return false; + } + + switch (lane_count) { + case 1: + case 2: + case 4: + break; + default: + return false; + } + + return true; +} + +static enum drm_mode_status +analogix_dp_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, + const struct drm_display_mode *mode) +{ + struct analogix_dp_device *dp = bridge->driver_private; + struct drm_display_mode m; + u32 max_link_rate, max_lane_count; + + drm_mode_copy(&m, mode); + + if (dp->plat_data->split_mode) + dp->plat_data->convert_to_origin_mode(&m); + + max_link_rate = min_t(u32, dp->video_info.max_link_rate, + dp->link_train.link_rate); + max_lane_count = min_t(u32, dp->video_info.max_lane_count, + dp->link_train.lane_count); + if (analogix_dp_link_config_validate(max_link_rate, max_lane_count) && + !analogix_dp_bandwidth_ok(dp, &m, + drm_dp_bw_code_to_link_rate(max_link_rate), + max_lane_count)) + return MODE_BAD; + + return MODE_OK; +} + static const struct drm_bridge_funcs analogix_dp_bridge_funcs = { .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, @@ -1611,29 +1748,30 @@ static const struct drm_bridge_funcs analogix_dp_bridge_funcs = { .atomic_post_disable = analogix_dp_bridge_atomic_post_disable, .mode_set = analogix_dp_bridge_mode_set, .attach = analogix_dp_bridge_attach, + .detach = analogix_dp_bridge_detach, + .mode_valid = analogix_dp_bridge_mode_valid, }; -static int analogix_dp_create_bridge(struct drm_device *drm_dev, - struct analogix_dp_device *dp) +static int analogix_dp_bridge_init(struct analogix_dp_device *dp) { - struct drm_bridge *bridge; + struct drm_bridge *bridge = &dp->bridge; int ret; - bridge = devm_kzalloc(drm_dev->dev, sizeof(*bridge), GFP_KERNEL); - if (!bridge) { - DRM_ERROR("failed to allocate for drm bridge\n"); - return -ENOMEM; + if (!dp->plat_data->left) { + ret = drm_bridge_attach(dp->encoder, bridge, NULL, 0); + if (ret) { + DRM_ERROR("failed to attach drm bridge\n"); + return ret; + } } - dp->bridge = bridge; - - bridge->driver_private = dp; - bridge->funcs = &analogix_dp_bridge_funcs; + if (dp->plat_data->right) { + struct analogix_dp_device *secondary = dp->plat_data->right; - ret = drm_bridge_attach(dp->encoder, bridge, NULL, 0); - if (ret) { - DRM_ERROR("failed to attach drm bridge\n"); - return -EINVAL; + ret = drm_bridge_attach(dp->encoder, &secondary->bridge, bridge, + DRM_BRIDGE_ATTACH_NO_CONNECTOR); + if (ret) + return ret; } return 0; @@ -1643,10 +1781,11 @@ static int analogix_dp_dt_parse_pdata(struct analogix_dp_device *dp) { struct device_node *dp_node = dp->dev->of_node; struct video_info *video_info = &dp->video_info; + struct property *prop; + int ret, len, num_lanes; switch (dp->plat_data->dev_type) { case RK3288_DP: - case RK3399_EDP: /* * Like Rk3288 DisplayPort TRM indicate that "Main link * containing 4 physical lanes of 2.7/1.62 Gbps/lane". @@ -1654,6 +1793,10 @@ static int analogix_dp_dt_parse_pdata(struct analogix_dp_device *dp) video_info->max_link_rate = 0x0A; video_info->max_lane_count = 0x04; break; + case RK3399_EDP: + video_info->max_link_rate = 0x14; + video_info->max_lane_count = 0x04; + break; case EXYNOS_DP: /* * NOTE: those property parseing code is used for @@ -1666,6 +1809,35 @@ static int analogix_dp_dt_parse_pdata(struct analogix_dp_device *dp) break; } + video_info->video_bist_enable = + of_property_read_bool(dp_node, "analogix,video-bist-enable"); + + prop = of_find_property(dp_node, "data-lanes", &len); + if (!prop) { + video_info->lane_map[0] = 0; + video_info->lane_map[1] = 1; + video_info->lane_map[2] = 2; + video_info->lane_map[3] = 3; + DRM_DEV_DEBUG(dp->dev, "failed to find data lane mapping, using default\n"); + return 0; + } + + num_lanes = len / sizeof(u32); + + if (num_lanes < 1 || num_lanes > 4 || num_lanes == 3) { + DRM_DEV_ERROR(dp->dev, "bad number of data lanes\n"); + return -EINVAL; + } + + video_info->max_lane_count = num_lanes; + + ret = of_property_read_u32_array(dp_node, "data-lanes", + video_info->lane_map, num_lanes); + if (ret) { + DRM_DEV_ERROR(dp->dev, "failed to read lane data\n"); + return ret; + } + return 0; } @@ -1673,20 +1845,96 @@ static ssize_t analogix_dpaux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) { struct analogix_dp_device *dp = to_dp(aux); + + return analogix_dp_transfer(dp, msg); +} + +int analogix_dp_audio_hw_params(struct analogix_dp_device *dp, + struct hdmi_codec_daifmt *daifmt, + struct hdmi_codec_params *params) +{ + switch (daifmt->fmt) { + case HDMI_SPDIF: + analogix_dp_audio_config_spdif(dp); + break; + case HDMI_I2S: + analogix_dp_audio_config_i2s(dp); + break; + default: + DRM_DEV_ERROR(dp->dev, "invalid daifmt %d\n", daifmt->fmt); + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL_GPL(analogix_dp_audio_hw_params); + +void analogix_dp_audio_shutdown(struct analogix_dp_device *dp) +{ + analogix_dp_audio_disable(dp); +} +EXPORT_SYMBOL_GPL(analogix_dp_audio_shutdown); + +int analogix_dp_audio_startup(struct analogix_dp_device *dp) +{ + analogix_dp_audio_enable(dp); + + return 0; +} +EXPORT_SYMBOL_GPL(analogix_dp_audio_startup); + +int analogix_dp_audio_get_eld(struct analogix_dp_device *dp, u8 *buf, size_t len) +{ + memcpy(buf, dp->connector.eld, min(sizeof(dp->connector.eld), len)); + + return 0; +} +EXPORT_SYMBOL_GPL(analogix_dp_audio_get_eld); + +static void analogix_dp_link_train_restore(struct analogix_dp_device *dp) +{ + u32 link_rate, lane_count; + u8 lane, spread; + + analogix_dp_get_link_bandwidth(dp, &link_rate); + analogix_dp_get_lane_count(dp, &lane_count); + drm_dp_dpcd_readb(&dp->aux, DP_MAX_DOWNSPREAD, &spread); + + dp->link_train.link_rate = link_rate; + dp->link_train.lane_count = lane_count; + dp->link_train.enhanced_framing = analogix_dp_get_enhanced_mode(dp); + dp->link_train.ssc = !!(spread & DP_MAX_DOWNSPREAD_0_5); + + for (lane = 0; lane < 4; lane++) + dp->link_train.training_lane[lane] = + analogix_dp_get_lane_link_training(dp, lane); +} + +int analogix_dp_loader_protect(struct analogix_dp_device *dp) +{ int ret; - pm_runtime_get_sync(dp->dev); + ret = analogix_dp_phy_power_on(dp); + if (ret) + return ret; - ret = analogix_dp_detect_hpd(dp); + dp->dpms_mode = DRM_MODE_DPMS_ON; + + analogix_dp_link_train_restore(dp); + + ret = analogix_dp_fast_link_train_detection(dp); if (ret) - goto out; + return ret; - ret = analogix_dp_transfer(dp, msg); -out: - pm_runtime_put(dp->dev); + if (analogix_dp_detect_sink_psr(dp)) { + ret = analogix_dp_enable_sink_psr(dp); + if (ret) + return ret; + } - return ret; + return 0; } +EXPORT_SYMBOL_GPL(analogix_dp_loader_protect); struct analogix_dp_device * analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data) @@ -1694,7 +1942,6 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data) struct platform_device *pdev = to_platform_device(dev); struct analogix_dp_device *dp; struct resource *res; - unsigned int irq_flags; int ret; if (!plat_data) { @@ -1708,9 +1955,10 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data) dp->dev = &pdev->dev; dp->dpms_mode = DRM_MODE_DPMS_OFF; + INIT_WORK(&dp->modeset_retry_work, analogix_dp_modeset_retry_work_fn); mutex_init(&dp->panel_lock); - dp->panel_is_modeset = false; + dp->panel_is_prepared = false; /* * platform dp driver need containor_of the plat_data to get @@ -1739,21 +1987,19 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data) } } - dp->clock = devm_clk_get(&pdev->dev, "dp"); - if (IS_ERR(dp->clock)) { - dev_err(&pdev->dev, "failed to get clock\n"); - return ERR_CAST(dp->clock); + ret = devm_clk_bulk_get_all(dev, &dp->clks); + if (ret < 0) { + dev_err(dev, "failed to get clocks %d\n", ret); + return ERR_PTR(ret); } - clk_prepare_enable(dp->clock); + dp->nr_clks = ret; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); dp->reg_base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(dp->reg_base)) { - ret = PTR_ERR(dp->reg_base); - goto err_disable_clk; - } + if (IS_ERR(dp->reg_base)) + return ERR_CAST(dp->reg_base); dp->force_hpd = of_property_read_bool(dev->of_node, "force-hpd"); @@ -1765,46 +2011,55 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data) if (IS_ERR(dp->hpd_gpiod)) { dev_err(dev, "error getting HDP GPIO: %ld\n", PTR_ERR(dp->hpd_gpiod)); - ret = PTR_ERR(dp->hpd_gpiod); - goto err_disable_clk; + return ERR_CAST(dp->hpd_gpiod); } if (dp->hpd_gpiod) { - /* - * Set up the hotplug GPIO from the device tree as an interrupt. - * Simply specifying a different interrupt in the device tree - * doesn't work since we handle hotplug rather differently when - * using a GPIO. We also need the actual GPIO specifier so - * that we can get the current state of the GPIO. - */ - dp->irq = gpiod_to_irq(dp->hpd_gpiod); - irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING; - } else { - dp->irq = platform_get_irq(pdev, 0); - irq_flags = 0; + ret = devm_request_threaded_irq(dev, + gpiod_to_irq(dp->hpd_gpiod), + NULL, + analogix_dp_hpd_irq_handler, + IRQF_TRIGGER_RISING | + IRQF_TRIGGER_FALLING | + IRQF_ONESHOT, + "analogix-hpd", dp); + if (ret) { + dev_err(dev, "failed to request hpd IRQ: %d\n", ret); + return ERR_PTR(ret); + } } + dp->irq = platform_get_irq(pdev, 0); if (dp->irq == -ENXIO) { dev_err(&pdev->dev, "failed to get irq\n"); - ret = -ENODEV; - goto err_disable_clk; + return ERR_PTR(-ENODEV); } - ret = devm_request_threaded_irq(&pdev->dev, dp->irq, - analogix_dp_hardirq, + irq_set_status_flags(dp->irq, IRQ_NOAUTOEN); + ret = devm_request_threaded_irq(dev, dp->irq, NULL, analogix_dp_irq_thread, - irq_flags, "analogix-dp", dp); + IRQF_ONESHOT, dev_name(dev), dp); if (ret) { dev_err(&pdev->dev, "failed to request irq\n"); - goto err_disable_clk; + return ERR_PTR(ret); } - disable_irq(dp->irq); - return dp; + dp->extcon = devm_extcon_dev_allocate(dev, analogix_dp_cable); + if (IS_ERR(dp->extcon)) { + dev_err(dev, "failed to allocate extcon device\n"); + return ERR_CAST(dp->extcon); + } + + ret = devm_extcon_dev_register(dev, dp->extcon); + if (ret) { + dev_err(dev, "failed to register extcon device\n"); + return ERR_PTR(ret); + } + + dp->bridge.driver_private = dp; + dp->bridge.funcs = &analogix_dp_bridge_funcs; -err_disable_clk: - clk_disable_unprepare(dp->clock); - return ERR_PTR(ret); + return dp; } EXPORT_SYMBOL_GPL(analogix_dp_probe); @@ -1824,16 +2079,21 @@ int analogix_dp_bind(struct analogix_dp_device *dp, struct drm_device *drm_dev) return ret; pm_runtime_enable(dp->dev); + pm_runtime_get_sync(dp->dev); + analogix_dp_init(dp); - ret = analogix_dp_create_bridge(drm_dev, dp); + ret = analogix_dp_bridge_init(dp); if (ret) { - DRM_ERROR("failed to create bridge (%d)\n", ret); + DRM_ERROR("failed to init bridge (%d)\n", ret); goto err_disable_pm_runtime; } + enable_irq(dp->irq); + return 0; err_disable_pm_runtime: + pm_runtime_put(dp->dev); pm_runtime_disable(dp->dev); return ret; @@ -1842,47 +2102,50 @@ EXPORT_SYMBOL_GPL(analogix_dp_bind); void analogix_dp_unbind(struct analogix_dp_device *dp) { - analogix_dp_bridge_disable(dp->bridge); + disable_irq(dp->irq); dp->connector.funcs->destroy(&dp->connector); - - if (dp->plat_data->panel) { - if (drm_panel_unprepare(dp->plat_data->panel)) - DRM_ERROR("failed to turnoff the panel\n"); - } - drm_dp_aux_unregister(&dp->aux); + pm_runtime_put(dp->dev); pm_runtime_disable(dp->dev); } EXPORT_SYMBOL_GPL(analogix_dp_unbind); void analogix_dp_remove(struct analogix_dp_device *dp) { - clk_disable_unprepare(dp->clock); + cancel_work_sync(&dp->modeset_retry_work); } EXPORT_SYMBOL_GPL(analogix_dp_remove); -#ifdef CONFIG_PM int analogix_dp_suspend(struct analogix_dp_device *dp) { - clk_disable_unprepare(dp->clock); + pm_runtime_force_suspend(dp->dev); + return 0; } EXPORT_SYMBOL_GPL(analogix_dp_suspend); int analogix_dp_resume(struct analogix_dp_device *dp) { - int ret; - - ret = clk_prepare_enable(dp->clock); - if (ret < 0) { - DRM_ERROR("Failed to prepare_enable the clock clk [%d]\n", ret); - return ret; - } + pm_runtime_force_resume(dp->dev); + analogix_dp_init(dp); return 0; } EXPORT_SYMBOL_GPL(analogix_dp_resume); -#endif + +int analogix_dp_runtime_suspend(struct analogix_dp_device *dp) +{ + clk_bulk_disable_unprepare(dp->nr_clks, dp->clks); + + return 0; +} +EXPORT_SYMBOL_GPL(analogix_dp_runtime_suspend); + +int analogix_dp_runtime_resume(struct analogix_dp_device *dp) +{ + return clk_bulk_prepare_enable(dp->nr_clks, dp->clks); +} +EXPORT_SYMBOL_GPL(analogix_dp_runtime_resume); int analogix_dp_start_crc(struct drm_connector *connector) { @@ -1909,3 +2172,4 @@ EXPORT_SYMBOL_GPL(analogix_dp_stop_crc); MODULE_AUTHOR("Jingoo Han "); MODULE_DESCRIPTION("Analogix DP Core Driver"); MODULE_LICENSE("GPL v2"); + diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h index c051502d7fbf..ebc9b51e39ad 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h @@ -10,6 +10,7 @@ #define _ANALOGIX_DP_CORE_H #include +#include #include #define DP_TIMEOUT_LOOP_COUNT 100 @@ -69,6 +70,7 @@ enum pattern_set { D10_2, TRAINING_PTN1, TRAINING_PTN2, + TRAINING_PTN3, DP_NONE }; @@ -120,15 +122,9 @@ enum analog_power_block { POWER_ALL }; -enum dp_irq_type { - DP_IRQ_TYPE_HP_CABLE_IN = BIT(0), - DP_IRQ_TYPE_HP_CABLE_OUT = BIT(1), - DP_IRQ_TYPE_HP_CHANGE = BIT(2), - DP_IRQ_TYPE_UNKNOWN = BIT(3), -}; - struct video_info { char *name; + struct drm_display_mode mode; bool h_sync_polarity; bool v_sync_polarity; @@ -141,6 +137,9 @@ struct video_info { int max_link_rate; enum link_lane_count_type max_lane_count; + u32 lane_map[4]; + + bool video_bist_enable; }; struct link_train { @@ -150,6 +149,8 @@ struct link_train { u8 link_rate; u8 lane_count; u8 training_lane[4]; + bool ssc; + bool enhanced_framing; enum link_training_state lt_state; }; @@ -159,9 +160,10 @@ struct analogix_dp_device { struct device *dev; struct drm_device *drm_dev; struct drm_connector connector; - struct drm_bridge *bridge; + struct drm_bridge bridge; struct drm_dp_aux aux; - struct clk *clock; + struct clk_bulk_data *clks; + int nr_clks; unsigned int irq; void __iomem *reg_base; @@ -173,17 +175,19 @@ struct analogix_dp_device { bool force_hpd; bool fast_train_enable; bool psr_supported; + struct work_struct modeset_retry_work; struct mutex panel_lock; - bool panel_is_modeset; + bool panel_is_prepared; + u8 dpcd[DP_RECEIVER_CAP_SIZE]; struct analogix_dp_plat_data *plat_data; + struct extcon_dev *extcon; }; /* analogix_dp_reg.c */ void analogix_dp_enable_video_mute(struct analogix_dp_device *dp, bool enable); void analogix_dp_stop_video(struct analogix_dp_device *dp); -void analogix_dp_lane_swap(struct analogix_dp_device *dp, bool enable); void analogix_dp_init_analog_param(struct analogix_dp_device *dp); void analogix_dp_init_interrupt(struct analogix_dp_device *dp); void analogix_dp_reset(struct analogix_dp_device *dp); @@ -199,7 +203,6 @@ void analogix_dp_set_analog_power_down(struct analogix_dp_device *dp, int analogix_dp_init_analog_func(struct analogix_dp_device *dp); void analogix_dp_init_hpd(struct analogix_dp_device *dp); void analogix_dp_force_hpd(struct analogix_dp_device *dp); -enum dp_irq_type analogix_dp_get_irq_type(struct analogix_dp_device *dp); void analogix_dp_clear_hotplug_interrupts(struct analogix_dp_device *dp); void analogix_dp_reset_aux(struct analogix_dp_device *dp); void analogix_dp_init_aux(struct analogix_dp_device *dp); @@ -211,28 +214,11 @@ void analogix_dp_set_lane_count(struct analogix_dp_device *dp, u32 count); void analogix_dp_get_lane_count(struct analogix_dp_device *dp, u32 *count); void analogix_dp_enable_enhanced_mode(struct analogix_dp_device *dp, bool enable); +bool analogix_dp_get_enhanced_mode(struct analogix_dp_device *dp); void analogix_dp_set_training_pattern(struct analogix_dp_device *dp, enum pattern_set pattern); -void analogix_dp_set_lane0_pre_emphasis(struct analogix_dp_device *dp, - u32 level); -void analogix_dp_set_lane1_pre_emphasis(struct analogix_dp_device *dp, - u32 level); -void analogix_dp_set_lane2_pre_emphasis(struct analogix_dp_device *dp, - u32 level); -void analogix_dp_set_lane3_pre_emphasis(struct analogix_dp_device *dp, - u32 level); -void analogix_dp_set_lane0_link_training(struct analogix_dp_device *dp, - u32 training_lane); -void analogix_dp_set_lane1_link_training(struct analogix_dp_device *dp, - u32 training_lane); -void analogix_dp_set_lane2_link_training(struct analogix_dp_device *dp, - u32 training_lane); -void analogix_dp_set_lane3_link_training(struct analogix_dp_device *dp, - u32 training_lane); -u32 analogix_dp_get_lane0_link_training(struct analogix_dp_device *dp); -u32 analogix_dp_get_lane1_link_training(struct analogix_dp_device *dp); -u32 analogix_dp_get_lane2_link_training(struct analogix_dp_device *dp); -u32 analogix_dp_get_lane3_link_training(struct analogix_dp_device *dp); +void analogix_dp_set_lane_link_training(struct analogix_dp_device *dp); +u32 analogix_dp_get_lane_link_training(struct analogix_dp_device *dp, u8 lane); void analogix_dp_reset_macro(struct analogix_dp_device *dp); void analogix_dp_init_video(struct analogix_dp_device *dp); @@ -255,5 +241,16 @@ int analogix_dp_send_psr_spd(struct analogix_dp_device *dp, struct dp_sdp *vsc, bool blocking); ssize_t analogix_dp_transfer(struct analogix_dp_device *dp, struct drm_dp_aux_msg *msg); +void analogix_dp_set_video_format(struct analogix_dp_device *dp); +void analogix_dp_video_bist_enable(struct analogix_dp_device *dp); +bool analogix_dp_ssc_supported(struct analogix_dp_device *dp); +int analogix_dp_phy_power_on(struct analogix_dp_device *dp); +void analogix_dp_phy_power_off(struct analogix_dp_device *dp); +void analogix_dp_audio_config_spdif(struct analogix_dp_device *dp); +void analogix_dp_audio_config_i2s(struct analogix_dp_device *dp); +void analogix_dp_audio_enable(struct analogix_dp_device *dp); +void analogix_dp_audio_disable(struct analogix_dp_device *dp); +void analogix_dp_init(struct analogix_dp_device *dp); +void analogix_dp_irq_handler(struct analogix_dp_device *dp); #endif /* _ANALOGIX_DP_CORE_H */ diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c index cab3f5c4e2fc..be64c0cf1146 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c @@ -11,30 +11,44 @@ #include #include #include +#include #include +#include #include "analogix_dp_core.h" #include "analogix_dp_reg.h" -#define COMMON_INT_MASK_1 0 -#define COMMON_INT_MASK_2 0 -#define COMMON_INT_MASK_3 0 -#define COMMON_INT_MASK_4 (HOTPLUG_CHG | HPD_LOST | PLUG) -#define INT_STA_MASK INT_HPD +static void analogix_dp_write(struct analogix_dp_device *dp, u32 reg, u32 val) +{ + if (dp->plat_data && is_rockchip(dp->plat_data->dev_type)) { + readl(dp->reg_base); + writel(val, dp->reg_base + reg); + } + + writel(val, dp->reg_base + reg); +} + +static u32 analogix_dp_read(struct analogix_dp_device *dp, u32 reg) +{ + if (dp->plat_data && is_rockchip(dp->plat_data->dev_type)) + readl(dp->reg_base + reg); + + return readl(dp->reg_base + reg); +} void analogix_dp_enable_video_mute(struct analogix_dp_device *dp, bool enable) { u32 reg; if (enable) { - reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1); + reg = analogix_dp_read(dp, ANALOGIX_DP_VIDEO_CTL_1); reg |= HDCP_VIDEO_MUTE; - writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1); + analogix_dp_write(dp, ANALOGIX_DP_VIDEO_CTL_1, reg); } else { - reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1); + reg = analogix_dp_read(dp, ANALOGIX_DP_VIDEO_CTL_1); reg &= ~HDCP_VIDEO_MUTE; - writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1); + analogix_dp_write(dp, ANALOGIX_DP_VIDEO_CTL_1, reg); } } @@ -42,23 +56,20 @@ void analogix_dp_stop_video(struct analogix_dp_device *dp) { u32 reg; - reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1); + reg = analogix_dp_read(dp, ANALOGIX_DP_VIDEO_CTL_1); reg &= ~VIDEO_EN; - writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1); + analogix_dp_write(dp, ANALOGIX_DP_VIDEO_CTL_1, reg); } -void analogix_dp_lane_swap(struct analogix_dp_device *dp, bool enable) +static void analogix_dp_set_lane_map(struct analogix_dp_device *dp) { - u32 reg; + struct video_info *video_info = &dp->video_info; + u32 i, reg = 0; - if (enable) - reg = LANE3_MAP_LOGIC_LANE_0 | LANE2_MAP_LOGIC_LANE_1 | - LANE1_MAP_LOGIC_LANE_2 | LANE0_MAP_LOGIC_LANE_3; - else - reg = LANE3_MAP_LOGIC_LANE_3 | LANE2_MAP_LOGIC_LANE_2 | - LANE1_MAP_LOGIC_LANE_1 | LANE0_MAP_LOGIC_LANE_0; + for (i = 0; i < video_info->max_lane_count; i++) + reg |= video_info->lane_map[i] << (2 * i); - writel(reg, dp->reg_base + ANALOGIX_DP_LANE_MAP); + analogix_dp_write(dp, ANALOGIX_DP_LANE_MAP, reg); } void analogix_dp_init_analog_param(struct analogix_dp_device *dp) @@ -66,53 +77,54 @@ void analogix_dp_init_analog_param(struct analogix_dp_device *dp) u32 reg; reg = TX_TERMINAL_CTRL_50_OHM; - writel(reg, dp->reg_base + ANALOGIX_DP_ANALOG_CTL_1); + analogix_dp_write(dp, ANALOGIX_DP_ANALOG_CTL_1, reg); reg = SEL_24M | TX_DVDD_BIT_1_0625V; - writel(reg, dp->reg_base + ANALOGIX_DP_ANALOG_CTL_2); + analogix_dp_write(dp, ANALOGIX_DP_ANALOG_CTL_2, reg); if (dp->plat_data && is_rockchip(dp->plat_data->dev_type)) { reg = REF_CLK_24M; if (dp->plat_data->dev_type == RK3288_DP) reg ^= REF_CLK_MASK; - writel(reg, dp->reg_base + ANALOGIX_DP_PLL_REG_1); - writel(0x95, dp->reg_base + ANALOGIX_DP_PLL_REG_2); - writel(0x40, dp->reg_base + ANALOGIX_DP_PLL_REG_3); - writel(0x58, dp->reg_base + ANALOGIX_DP_PLL_REG_4); - writel(0x22, dp->reg_base + ANALOGIX_DP_PLL_REG_5); + analogix_dp_write(dp, ANALOGIX_DP_PLL_REG_1, reg); + analogix_dp_write(dp, ANALOGIX_DP_PLL_REG_2, 0x99); + analogix_dp_write(dp, ANALOGIX_DP_PLL_REG_3, 0x40); + analogix_dp_write(dp, ANALOGIX_DP_PLL_REG_4, 0x58); + analogix_dp_write(dp, ANALOGIX_DP_PLL_REG_5, 0x22); + analogix_dp_write(dp, ANALOGIX_DP_BIAS, 0x44); } reg = DRIVE_DVDD_BIT_1_0625V | VCO_BIT_600_MICRO; - writel(reg, dp->reg_base + ANALOGIX_DP_ANALOG_CTL_3); + analogix_dp_write(dp, ANALOGIX_DP_ANALOG_CTL_3, reg); reg = PD_RING_OSC | AUX_TERMINAL_CTRL_50_OHM | TX_CUR1_2X | TX_CUR_16_MA; - writel(reg, dp->reg_base + ANALOGIX_DP_PLL_FILTER_CTL_1); + analogix_dp_write(dp, ANALOGIX_DP_PLL_FILTER_CTL_1, reg); reg = CH3_AMP_400_MV | CH2_AMP_400_MV | CH1_AMP_400_MV | CH0_AMP_400_MV; - writel(reg, dp->reg_base + ANALOGIX_DP_TX_AMP_TUNING_CTL); + analogix_dp_write(dp, ANALOGIX_DP_TX_AMP_TUNING_CTL, reg); } void analogix_dp_init_interrupt(struct analogix_dp_device *dp) { /* Set interrupt pin assertion polarity as high */ - writel(INT_POL1 | INT_POL0, dp->reg_base + ANALOGIX_DP_INT_CTL); + analogix_dp_write(dp, ANALOGIX_DP_INT_CTL, INT_POL1 | INT_POL0); /* Clear pending regisers */ - writel(0xff, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_1); - writel(0x4f, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_2); - writel(0xe0, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_3); - writel(0xe7, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_4); - writel(0x63, dp->reg_base + ANALOGIX_DP_INT_STA); + analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_STA_1, 0xff); + analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_STA_2, 0x4f); + analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_STA_3, 0xe0); + analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_STA_4, 0xe7); + analogix_dp_write(dp, ANALOGIX_DP_INT_STA, 0x63); /* 0:mask,1: unmask */ - writel(0x00, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_1); - writel(0x00, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_2); - writel(0x00, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_3); - writel(0x00, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_4); - writel(0x00, dp->reg_base + ANALOGIX_DP_INT_STA_MASK); + analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_MASK_1, 0x00); + analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_MASK_2, 0x00); + analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_MASK_3, 0x00); + analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_MASK_4, 0x00); + analogix_dp_write(dp, ANALOGIX_DP_INT_STA_MASK, 0x00); } void analogix_dp_reset(struct analogix_dp_device *dp) @@ -130,65 +142,54 @@ void analogix_dp_reset(struct analogix_dp_device *dp) AUD_FIFO_FUNC_EN_N | AUD_FUNC_EN_N | HDCP_FUNC_EN_N | SW_FUNC_EN_N; - writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_1); + analogix_dp_write(dp, ANALOGIX_DP_FUNC_EN_1, reg); reg = SSC_FUNC_EN_N | AUX_FUNC_EN_N | SERDES_FIFO_FUNC_EN_N | LS_CLK_DOMAIN_FUNC_EN_N; - writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_2); + analogix_dp_write(dp, ANALOGIX_DP_FUNC_EN_2, reg); usleep_range(20, 30); - analogix_dp_lane_swap(dp, 0); - - writel(0x0, dp->reg_base + ANALOGIX_DP_SYS_CTL_1); - writel(0x40, dp->reg_base + ANALOGIX_DP_SYS_CTL_2); - writel(0x0, dp->reg_base + ANALOGIX_DP_SYS_CTL_3); - writel(0x0, dp->reg_base + ANALOGIX_DP_SYS_CTL_4); + analogix_dp_set_lane_map(dp); - writel(0x0, dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL); - writel(0x0, dp->reg_base + ANALOGIX_DP_HDCP_CTL); + analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_1, 0x0); + analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_2, 0x40); + analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_3, 0x0); + analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_4, 0x0); - writel(0x5e, dp->reg_base + ANALOGIX_DP_HPD_DEGLITCH_L); - writel(0x1a, dp->reg_base + ANALOGIX_DP_HPD_DEGLITCH_H); + analogix_dp_write(dp, ANALOGIX_DP_PKT_SEND_CTL, 0x0); + analogix_dp_write(dp, ANALOGIX_DP_HDCP_CTL, 0x0); - writel(0x10, dp->reg_base + ANALOGIX_DP_LINK_DEBUG_CTL); + analogix_dp_write(dp, ANALOGIX_DP_LINK_DEBUG_CTL, 0x10); - writel(0x0, dp->reg_base + ANALOGIX_DP_PHY_TEST); + analogix_dp_write(dp, ANALOGIX_DP_PHY_TEST, 0x0); - writel(0x0, dp->reg_base + ANALOGIX_DP_VIDEO_FIFO_THRD); - writel(0x20, dp->reg_base + ANALOGIX_DP_AUDIO_MARGIN); + analogix_dp_write(dp, ANALOGIX_DP_VIDEO_FIFO_THRD, 0x0); + analogix_dp_write(dp, ANALOGIX_DP_AUDIO_MARGIN, 0x20); - writel(0x4, dp->reg_base + ANALOGIX_DP_M_VID_GEN_FILTER_TH); - writel(0x2, dp->reg_base + ANALOGIX_DP_M_AUD_GEN_FILTER_TH); + analogix_dp_write(dp, ANALOGIX_DP_M_VID_GEN_FILTER_TH, 0x4); + analogix_dp_write(dp, ANALOGIX_DP_M_AUD_GEN_FILTER_TH, 0x2); - writel(0x00000101, dp->reg_base + ANALOGIX_DP_SOC_GENERAL_CTL); + analogix_dp_write(dp, ANALOGIX_DP_SOC_GENERAL_CTL, 0x00000101); } void analogix_dp_swreset(struct analogix_dp_device *dp) { - writel(RESET_DP_TX, dp->reg_base + ANALOGIX_DP_TX_SW_RESET); + analogix_dp_write(dp, ANALOGIX_DP_TX_SW_RESET, RESET_DP_TX); } void analogix_dp_config_interrupt(struct analogix_dp_device *dp) { - u32 reg; - /* 0: mask, 1: unmask */ - reg = COMMON_INT_MASK_1; - writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_1); - - reg = COMMON_INT_MASK_2; - writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_2); - - reg = COMMON_INT_MASK_3; - writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_3); + analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_MASK_1, 0); + analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_MASK_2, 0); + analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_MASK_3, 0); - reg = COMMON_INT_MASK_4; - writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_4); - - reg = INT_STA_MASK; - writel(reg, dp->reg_base + ANALOGIX_DP_INT_STA_MASK); + if (dp->force_hpd || dp->hpd_gpiod) + analogix_dp_mute_hpd_interrupt(dp); + else + analogix_dp_unmute_hpd_interrupt(dp); } void analogix_dp_mute_hpd_interrupt(struct analogix_dp_device *dp) @@ -196,13 +197,13 @@ void analogix_dp_mute_hpd_interrupt(struct analogix_dp_device *dp) u32 reg; /* 0: mask, 1: unmask */ - reg = readl(dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_4); - reg &= ~COMMON_INT_MASK_4; - writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_4); + reg = analogix_dp_read(dp, ANALOGIX_DP_COMMON_INT_MASK_4); + reg &= ~HOTPLUG_CHG; + analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_MASK_4, reg); - reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA_MASK); - reg &= ~INT_STA_MASK; - writel(reg, dp->reg_base + ANALOGIX_DP_INT_STA_MASK); + reg = analogix_dp_read(dp, ANALOGIX_DP_INT_STA_MASK); + reg &= ~INT_HPD; + analogix_dp_write(dp, ANALOGIX_DP_INT_STA_MASK, reg); } void analogix_dp_unmute_hpd_interrupt(struct analogix_dp_device *dp) @@ -210,18 +211,20 @@ void analogix_dp_unmute_hpd_interrupt(struct analogix_dp_device *dp) u32 reg; /* 0: mask, 1: unmask */ - reg = COMMON_INT_MASK_4; - writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_4); + reg = analogix_dp_read(dp, ANALOGIX_DP_COMMON_INT_MASK_4); + reg |= HOTPLUG_CHG; + analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_MASK_4, reg); - reg = INT_STA_MASK; - writel(reg, dp->reg_base + ANALOGIX_DP_INT_STA_MASK); + reg = analogix_dp_read(dp, ANALOGIX_DP_INT_STA_MASK); + reg |= INT_HPD; + analogix_dp_write(dp, ANALOGIX_DP_INT_STA_MASK, reg); } enum pll_status analogix_dp_get_pll_lock_status(struct analogix_dp_device *dp) { u32 reg; - reg = readl(dp->reg_base + ANALOGIX_DP_DEBUG_CTL); + reg = analogix_dp_read(dp, ANALOGIX_DP_DEBUG_CTL); if (reg & PLL_LOCK) return PLL_LOCKED; else @@ -239,12 +242,12 @@ void analogix_dp_set_pll_power_down(struct analogix_dp_device *dp, bool enable) mask = RK_PLL_PD; } - reg = readl(dp->reg_base + pd_addr); + reg = analogix_dp_read(dp, pd_addr); if (enable) reg |= mask; else reg &= ~mask; - writel(reg, dp->reg_base + pd_addr); + analogix_dp_write(dp, pd_addr, reg); } void analogix_dp_set_analog_power_down(struct analogix_dp_device *dp, @@ -265,52 +268,54 @@ void analogix_dp_set_analog_power_down(struct analogix_dp_device *dp, else mask = AUX_PD; - reg = readl(dp->reg_base + phy_pd_addr); - if (enable) + reg = analogix_dp_read(dp, phy_pd_addr); + if (enable) { + reg &= ~(DP_INC_BG | DP_EXP_BG); reg |= mask; - else + } else { reg &= ~mask; - writel(reg, dp->reg_base + phy_pd_addr); + } + analogix_dp_write(dp, phy_pd_addr, reg); break; case CH0_BLOCK: mask = CH0_PD; - reg = readl(dp->reg_base + phy_pd_addr); + reg = analogix_dp_read(dp, phy_pd_addr); if (enable) reg |= mask; else reg &= ~mask; - writel(reg, dp->reg_base + phy_pd_addr); + analogix_dp_write(dp, phy_pd_addr, reg); break; case CH1_BLOCK: mask = CH1_PD; - reg = readl(dp->reg_base + phy_pd_addr); + reg = analogix_dp_read(dp, phy_pd_addr); if (enable) reg |= mask; else reg &= ~mask; - writel(reg, dp->reg_base + phy_pd_addr); + analogix_dp_write(dp, phy_pd_addr, reg); break; case CH2_BLOCK: mask = CH2_PD; - reg = readl(dp->reg_base + phy_pd_addr); + reg = analogix_dp_read(dp, phy_pd_addr); if (enable) reg |= mask; else reg &= ~mask; - writel(reg, dp->reg_base + phy_pd_addr); + analogix_dp_write(dp, phy_pd_addr, reg); break; case CH3_BLOCK: mask = CH3_PD; - reg = readl(dp->reg_base + phy_pd_addr); + reg = analogix_dp_read(dp, phy_pd_addr); if (enable) reg |= mask; else reg &= ~mask; - writel(reg, dp->reg_base + phy_pd_addr); + analogix_dp_write(dp, phy_pd_addr, reg); break; case ANALOG_TOTAL: /* @@ -323,29 +328,29 @@ void analogix_dp_set_analog_power_down(struct analogix_dp_device *dp, else mask = DP_PHY_PD; - reg = readl(dp->reg_base + phy_pd_addr); + reg = analogix_dp_read(dp, phy_pd_addr); if (enable) reg |= mask; else reg &= ~mask; - writel(reg, dp->reg_base + phy_pd_addr); + analogix_dp_write(dp, phy_pd_addr, reg); if (dp->plat_data && is_rockchip(dp->plat_data->dev_type)) usleep_range(10, 15); break; case POWER_ALL: if (enable) { reg = DP_ALL_PD; - writel(reg, dp->reg_base + phy_pd_addr); + analogix_dp_write(dp, phy_pd_addr, reg); } else { reg = DP_ALL_PD; - writel(reg, dp->reg_base + phy_pd_addr); + analogix_dp_write(dp, phy_pd_addr, reg); usleep_range(10, 15); reg &= ~DP_INC_BG; - writel(reg, dp->reg_base + phy_pd_addr); + analogix_dp_write(dp, phy_pd_addr, reg); usleep_range(10, 15); - writel(0x00, dp->reg_base + phy_pd_addr); + analogix_dp_write(dp, phy_pd_addr, 0x00); } break; default: @@ -356,36 +361,24 @@ void analogix_dp_set_analog_power_down(struct analogix_dp_device *dp, int analogix_dp_init_analog_func(struct analogix_dp_device *dp) { u32 reg; - int timeout_loop = 0; analogix_dp_set_analog_power_down(dp, POWER_ALL, 0); reg = PLL_LOCK_CHG; - writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_1); + analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_STA_1, reg); - reg = readl(dp->reg_base + ANALOGIX_DP_DEBUG_CTL); + reg = analogix_dp_read(dp, ANALOGIX_DP_DEBUG_CTL); reg &= ~(F_PLL_LOCK | PLL_LOCK_CTRL); - writel(reg, dp->reg_base + ANALOGIX_DP_DEBUG_CTL); + analogix_dp_write(dp, ANALOGIX_DP_DEBUG_CTL, reg); /* Power up PLL */ - if (analogix_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) { - analogix_dp_set_pll_power_down(dp, 0); - - while (analogix_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) { - timeout_loop++; - if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) { - dev_err(dp->dev, "failed to get pll lock status\n"); - return -ETIMEDOUT; - } - usleep_range(10, 20); - } - } + analogix_dp_set_pll_power_down(dp, 0); /* Enable Serdes FIFO function and Link symbol clock domain module */ - reg = readl(dp->reg_base + ANALOGIX_DP_FUNC_EN_2); + reg = analogix_dp_read(dp, ANALOGIX_DP_FUNC_EN_2); reg &= ~(SERDES_FIFO_FUNC_EN_N | LS_CLK_DOMAIN_FUNC_EN_N | AUX_FUNC_EN_N); - writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_2); + analogix_dp_write(dp, ANALOGIX_DP_FUNC_EN_2, reg); return 0; } @@ -397,10 +390,10 @@ void analogix_dp_clear_hotplug_interrupts(struct analogix_dp_device *dp) return; reg = HOTPLUG_CHG | HPD_LOST | PLUG; - writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_4); + analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_STA_4, reg); reg = INT_HPD; - writel(reg, dp->reg_base + ANALOGIX_DP_INT_STA); + analogix_dp_write(dp, ANALOGIX_DP_INT_STA, reg); } void analogix_dp_init_hpd(struct analogix_dp_device *dp) @@ -410,47 +403,47 @@ void analogix_dp_init_hpd(struct analogix_dp_device *dp) if (dp->hpd_gpiod) return; - analogix_dp_clear_hotplug_interrupts(dp); + analogix_dp_write(dp, ANALOGIX_DP_HPD_DEGLITCH_H, 0xbb); + analogix_dp_write(dp, ANALOGIX_DP_HPD_DEGLITCH_L, 0x80); - reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_3); + reg = analogix_dp_read(dp, ANALOGIX_DP_SYS_CTL_3); reg &= ~(F_HPD | HPD_CTRL); - writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_3); + analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_3, reg); } void analogix_dp_force_hpd(struct analogix_dp_device *dp) { u32 reg; - reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_3); - reg = (F_HPD | HPD_CTRL); - writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_3); + reg = analogix_dp_read(dp, ANALOGIX_DP_SYS_CTL_3); + reg |= (F_HPD | HPD_CTRL); + analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_3, reg); } -enum dp_irq_type analogix_dp_get_irq_type(struct analogix_dp_device *dp) +static void analogix_dp_handle_hpd_event(struct analogix_dp_device *dp) { + bool changed = false; u32 reg; - if (dp->hpd_gpiod) { - reg = gpiod_get_value(dp->hpd_gpiod); - if (reg) - return DP_IRQ_TYPE_HP_CABLE_IN; - else - return DP_IRQ_TYPE_HP_CABLE_OUT; - } else { - /* Parse hotplug interrupt status register */ - reg = readl(dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_4); - - if (reg & PLUG) - return DP_IRQ_TYPE_HP_CABLE_IN; + reg = analogix_dp_read(dp, ANALOGIX_DP_INT_STA); + if (reg & INT_HPD) { + dev_info(dp->dev, "irq-hpd, it's being ignored for now\n"); + analogix_dp_write(dp, ANALOGIX_DP_INT_STA, INT_HPD); + } - if (reg & HPD_LOST) - return DP_IRQ_TYPE_HP_CABLE_OUT; + reg = analogix_dp_read(dp, ANALOGIX_DP_COMMON_INT_STA_4); + if (reg & HOTPLUG_CHG) { + analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_STA_4, HOTPLUG_CHG); + changed = true; + } - if (reg & HOTPLUG_CHG) - return DP_IRQ_TYPE_HP_CHANGE; + if (changed) + drm_helper_hpd_irq_event(dp->drm_dev); +} - return DP_IRQ_TYPE_UNKNOWN; - } +void analogix_dp_irq_handler(struct analogix_dp_device *dp) +{ + analogix_dp_handle_hpd_event(dp); } void analogix_dp_reset_aux(struct analogix_dp_device *dp) @@ -458,9 +451,9 @@ void analogix_dp_reset_aux(struct analogix_dp_device *dp) u32 reg; /* Disable AUX channel module */ - reg = readl(dp->reg_base + ANALOGIX_DP_FUNC_EN_2); + reg = analogix_dp_read(dp, ANALOGIX_DP_FUNC_EN_2); reg |= AUX_FUNC_EN_N; - writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_2); + analogix_dp_write(dp, ANALOGIX_DP_FUNC_EN_2, reg); } void analogix_dp_init_aux(struct analogix_dp_device *dp) @@ -469,7 +462,7 @@ void analogix_dp_init_aux(struct analogix_dp_device *dp) /* Clear inerrupts related to AUX channel */ reg = RPLY_RECEIV | AUX_ERR; - writel(reg, dp->reg_base + ANALOGIX_DP_INT_STA); + analogix_dp_write(dp, ANALOGIX_DP_INT_STA, reg); analogix_dp_set_analog_power_down(dp, AUX_BLOCK, true); usleep_range(10, 11); @@ -487,16 +480,17 @@ void analogix_dp_init_aux(struct analogix_dp_device *dp) reg |= AUX_HW_RETRY_COUNT_SEL(0) | AUX_HW_RETRY_INTERVAL_600_MICROSECONDS; - writel(reg, dp->reg_base + ANALOGIX_DP_AUX_HW_RETRY_CTL); + analogix_dp_write(dp, ANALOGIX_DP_AUX_HW_RETRY_CTL, reg); /* Receive AUX Channel DEFER commands equal to DEFFER_COUNT*64 */ reg = DEFER_CTRL_EN | DEFER_COUNT(1); - writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_DEFER_CTL); + analogix_dp_write(dp, ANALOGIX_DP_AUX_CH_DEFER_CTL, reg); /* Enable AUX channel module */ - reg = readl(dp->reg_base + ANALOGIX_DP_FUNC_EN_2); + analogix_dp_enable_sw_function(dp); + reg = analogix_dp_read(dp, ANALOGIX_DP_FUNC_EN_2); reg &= ~AUX_FUNC_EN_N; - writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_2); + analogix_dp_write(dp, ANALOGIX_DP_FUNC_EN_2, reg); } int analogix_dp_get_plug_in_status(struct analogix_dp_device *dp) @@ -507,7 +501,7 @@ int analogix_dp_get_plug_in_status(struct analogix_dp_device *dp) if (gpiod_get_value(dp->hpd_gpiod)) return 0; } else { - reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_3); + reg = analogix_dp_read(dp, ANALOGIX_DP_SYS_CTL_3); if (reg & HPD_STATUS) return 0; } @@ -519,148 +513,193 @@ void analogix_dp_enable_sw_function(struct analogix_dp_device *dp) { u32 reg; - reg = readl(dp->reg_base + ANALOGIX_DP_FUNC_EN_1); + reg = analogix_dp_read(dp, ANALOGIX_DP_FUNC_EN_1); reg &= ~SW_FUNC_EN_N; - writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_1); + analogix_dp_write(dp, ANALOGIX_DP_FUNC_EN_1, reg); } -int analogix_dp_start_aux_transaction(struct analogix_dp_device *dp) +static void analogix_dp_ssc_enable(struct analogix_dp_device *dp) { - int reg; - int retval = 0; - int timeout_loop = 0; - - /* Enable AUX CH operation */ - reg = readl(dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2); - reg |= AUX_EN; - writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2); - - /* Is AUX CH command reply received? */ - reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA); - while (!(reg & RPLY_RECEIV)) { - timeout_loop++; - if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) { - dev_err(dp->dev, "AUX CH command reply failed!\n"); - return -ETIMEDOUT; - } - reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA); - usleep_range(10, 11); - } - - /* Clear interrupt source for AUX CH command reply */ - writel(RPLY_RECEIV, dp->reg_base + ANALOGIX_DP_INT_STA); - - /* Clear interrupt source for AUX CH access error */ - reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA); - if (reg & AUX_ERR) { - writel(AUX_ERR, dp->reg_base + ANALOGIX_DP_INT_STA); - return -EREMOTEIO; - } - - /* Check AUX CH error access status */ - reg = readl(dp->reg_base + ANALOGIX_DP_AUX_CH_STA); - if ((reg & AUX_STATUS_MASK) != 0) { - dev_err(dp->dev, "AUX CH error happens: %d\n\n", - reg & AUX_STATUS_MASK); - return -EREMOTEIO; - } + u32 reg; - return retval; + /* 4500ppm */ + writel(0x19, dp->reg_base + ANALOIGX_DP_SSC_REG); + /* + * To apply updated SSC parameters into SSC operation, + * firmware must disable and enable this bit. + */ + reg = readl(dp->reg_base + ANALOGIX_DP_FUNC_EN_2); + reg |= SSC_FUNC_EN_N; + writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_2); + reg &= ~SSC_FUNC_EN_N; + writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_2); } -int analogix_dp_write_byte_to_dpcd(struct analogix_dp_device *dp, - unsigned int reg_addr, - unsigned char data) +static void analogix_dp_ssc_disable(struct analogix_dp_device *dp) { u32 reg; - int i; - int retval; - - for (i = 0; i < 3; i++) { - /* Clear AUX CH data buffer */ - reg = BUF_CLR; - writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL); - - /* Select DPCD device address */ - reg = AUX_ADDR_7_0(reg_addr); - writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_7_0); - reg = AUX_ADDR_15_8(reg_addr); - writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_15_8); - reg = AUX_ADDR_19_16(reg_addr); - writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_19_16); - - /* Write data buffer */ - reg = (unsigned int)data; - writel(reg, dp->reg_base + ANALOGIX_DP_BUF_DATA_0); - - /* - * Set DisplayPort transaction and write 1 byte - * If bit 3 is 1, DisplayPort transaction. - * If Bit 3 is 0, I2C transaction. - */ - reg = AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_WRITE; - writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1); - /* Start AUX transaction */ - retval = analogix_dp_start_aux_transaction(dp); - if (retval == 0) - break; - - dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__); - } + reg = readl(dp->reg_base + ANALOGIX_DP_FUNC_EN_2); + reg |= SSC_FUNC_EN_N; + writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_2); +} - return retval; +bool analogix_dp_ssc_supported(struct analogix_dp_device *dp) +{ + /* Check if SSC is supported by both sides */ + return dp->plat_data->ssc && dp->link_train.ssc; } void analogix_dp_set_link_bandwidth(struct analogix_dp_device *dp, u32 bwtype) { - u32 reg; + u32 status; + int ret; - reg = bwtype; - if ((bwtype == DP_LINK_BW_2_7) || (bwtype == DP_LINK_BW_1_62)) - writel(reg, dp->reg_base + ANALOGIX_DP_LINK_BW_SET); + analogix_dp_write(dp, ANALOGIX_DP_LINK_BW_SET, bwtype); + + if (dp->phy) { + union phy_configure_opts phy_cfg = {0}; + + phy_cfg.dp.lanes = dp->link_train.lane_count; + phy_cfg.dp.link_rate = + drm_dp_bw_code_to_link_rate(dp->link_train.link_rate) / 100; + phy_cfg.dp.ssc = analogix_dp_ssc_supported(dp); + phy_cfg.dp.set_lanes = false; + phy_cfg.dp.set_rate = true; + phy_cfg.dp.set_voltages = false; + ret = phy_configure(dp->phy, &phy_cfg); + if (ret && ret != -EOPNOTSUPP) { + dev_err(dp->dev, "%s: phy_configure failed: %d\n", + __func__, ret); + return; + } + } else { + if (analogix_dp_ssc_supported(dp)) + analogix_dp_ssc_enable(dp); + else + analogix_dp_ssc_disable(dp); + } + + ret = readx_poll_timeout(analogix_dp_get_pll_lock_status, dp, status, + status != PLL_UNLOCKED, 120, + 120 * DP_TIMEOUT_LOOP_COUNT); + if (ret) { + dev_err(dp->dev, "Wait for pll lock failed %d\n", ret); + return; + } } void analogix_dp_get_link_bandwidth(struct analogix_dp_device *dp, u32 *bwtype) { u32 reg; - reg = readl(dp->reg_base + ANALOGIX_DP_LINK_BW_SET); + reg = analogix_dp_read(dp, ANALOGIX_DP_LINK_BW_SET); *bwtype = reg; } void analogix_dp_set_lane_count(struct analogix_dp_device *dp, u32 count) { u32 reg; + int ret; reg = count; - writel(reg, dp->reg_base + ANALOGIX_DP_LANE_COUNT_SET); + analogix_dp_write(dp, ANALOGIX_DP_LANE_COUNT_SET, reg); + + if (dp->phy) { + union phy_configure_opts phy_cfg = {0}; + + phy_cfg.dp.lanes = dp->link_train.lane_count; + phy_cfg.dp.set_lanes = true; + phy_cfg.dp.set_rate = false; + phy_cfg.dp.set_voltages = false; + ret = phy_configure(dp->phy, &phy_cfg); + if (ret && ret != -EOPNOTSUPP) { + dev_err(dp->dev, "%s: phy_configure() failed: %d\n", + __func__, ret); + return; + } + } } void analogix_dp_get_lane_count(struct analogix_dp_device *dp, u32 *count) { u32 reg; - reg = readl(dp->reg_base + ANALOGIX_DP_LANE_COUNT_SET); + reg = analogix_dp_read(dp, ANALOGIX_DP_LANE_COUNT_SET); *count = reg; } +void analogix_dp_set_lane_link_training(struct analogix_dp_device *dp) +{ + u8 lane; + int ret; + + for (lane = 0; lane < dp->link_train.lane_count; lane++) + analogix_dp_write(dp, + ANALOGIX_DP_LN0_LINK_TRAINING_CTL + 4 * lane, + dp->link_train.training_lane[lane]); + + if (dp->phy) { + union phy_configure_opts phy_cfg = {0}; + + for (lane = 0; lane < dp->link_train.lane_count; lane++) { + u8 training_lane = dp->link_train.training_lane[lane]; + u8 vs, pe; + + vs = (training_lane & DP_TRAIN_VOLTAGE_SWING_MASK) >> + DP_TRAIN_VOLTAGE_SWING_SHIFT; + pe = (training_lane & DP_TRAIN_PRE_EMPHASIS_MASK) >> + DP_TRAIN_PRE_EMPHASIS_SHIFT; + phy_cfg.dp.voltage[lane] = vs; + phy_cfg.dp.pre[lane] = pe; + } + + phy_cfg.dp.lanes = dp->link_train.lane_count; + phy_cfg.dp.link_rate = + drm_dp_bw_code_to_link_rate(dp->link_train.link_rate) / 100; + phy_cfg.dp.set_lanes = false; + phy_cfg.dp.set_rate = false; + phy_cfg.dp.set_voltages = true; + ret = phy_configure(dp->phy, &phy_cfg); + if (ret && ret != -EOPNOTSUPP) { + dev_err(dp->dev, "%s: phy_configure() failed: %d\n", + __func__, ret); + return; + } + } +} + +u32 analogix_dp_get_lane_link_training(struct analogix_dp_device *dp, u8 lane) +{ + return analogix_dp_read(dp, + ANALOGIX_DP_LN0_LINK_TRAINING_CTL + 4 * lane); +} + void analogix_dp_enable_enhanced_mode(struct analogix_dp_device *dp, bool enable) { u32 reg; if (enable) { - reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_4); + reg = analogix_dp_read(dp, ANALOGIX_DP_SYS_CTL_4); reg |= ENHANCED; - writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_4); + analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_4, reg); } else { - reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_4); + reg = analogix_dp_read(dp, ANALOGIX_DP_SYS_CTL_4); reg &= ~ENHANCED; - writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_4); + analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_4, reg); } } +bool analogix_dp_get_enhanced_mode(struct analogix_dp_device *dp) +{ + u32 reg; + + reg = analogix_dp_read(dp, ANALOGIX_DP_SYS_CTL_4); + + return !!(reg & ENHANCED); +} + void analogix_dp_set_training_pattern(struct analogix_dp_device *dp, enum pattern_set pattern) { @@ -669,144 +708,48 @@ void analogix_dp_set_training_pattern(struct analogix_dp_device *dp, switch (pattern) { case PRBS7: reg = SCRAMBLING_ENABLE | LINK_QUAL_PATTERN_SET_PRBS7; - writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET); + analogix_dp_write(dp, ANALOGIX_DP_TRAINING_PTN_SET, reg); break; case D10_2: reg = SCRAMBLING_ENABLE | LINK_QUAL_PATTERN_SET_D10_2; - writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET); + analogix_dp_write(dp, ANALOGIX_DP_TRAINING_PTN_SET, reg); break; case TRAINING_PTN1: reg = SCRAMBLING_DISABLE | SW_TRAINING_PATTERN_SET_PTN1; - writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET); + analogix_dp_write(dp, ANALOGIX_DP_TRAINING_PTN_SET, reg); break; case TRAINING_PTN2: reg = SCRAMBLING_DISABLE | SW_TRAINING_PATTERN_SET_PTN2; - writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET); + analogix_dp_write(dp, ANALOGIX_DP_TRAINING_PTN_SET, reg); + break; + case TRAINING_PTN3: + reg = SCRAMBLING_DISABLE | SW_TRAINING_PATTERN_SET_PTN3; + analogix_dp_write(dp, ANALOGIX_DP_TRAINING_PTN_SET, reg); break; case DP_NONE: reg = SCRAMBLING_ENABLE | LINK_QUAL_PATTERN_SET_DISABLE | SW_TRAINING_PATTERN_SET_NORMAL; - writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET); + analogix_dp_write(dp, ANALOGIX_DP_TRAINING_PTN_SET, reg); break; default: break; } } -void analogix_dp_set_lane0_pre_emphasis(struct analogix_dp_device *dp, - u32 level) -{ - u32 reg; - - reg = readl(dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL); - reg &= ~PRE_EMPHASIS_SET_MASK; - reg |= level << PRE_EMPHASIS_SET_SHIFT; - writel(reg, dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL); -} - -void analogix_dp_set_lane1_pre_emphasis(struct analogix_dp_device *dp, - u32 level) -{ - u32 reg; - - reg = readl(dp->reg_base + ANALOGIX_DP_LN1_LINK_TRAINING_CTL); - reg &= ~PRE_EMPHASIS_SET_MASK; - reg |= level << PRE_EMPHASIS_SET_SHIFT; - writel(reg, dp->reg_base + ANALOGIX_DP_LN1_LINK_TRAINING_CTL); -} - -void analogix_dp_set_lane2_pre_emphasis(struct analogix_dp_device *dp, - u32 level) -{ - u32 reg; - - reg = readl(dp->reg_base + ANALOGIX_DP_LN2_LINK_TRAINING_CTL); - reg &= ~PRE_EMPHASIS_SET_MASK; - reg |= level << PRE_EMPHASIS_SET_SHIFT; - writel(reg, dp->reg_base + ANALOGIX_DP_LN2_LINK_TRAINING_CTL); -} - -void analogix_dp_set_lane3_pre_emphasis(struct analogix_dp_device *dp, - u32 level) -{ - u32 reg; - - reg = readl(dp->reg_base + ANALOGIX_DP_LN3_LINK_TRAINING_CTL); - reg &= ~PRE_EMPHASIS_SET_MASK; - reg |= level << PRE_EMPHASIS_SET_SHIFT; - writel(reg, dp->reg_base + ANALOGIX_DP_LN3_LINK_TRAINING_CTL); -} - -void analogix_dp_set_lane0_link_training(struct analogix_dp_device *dp, - u32 training_lane) -{ - u32 reg; - - reg = training_lane; - writel(reg, dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL); -} - -void analogix_dp_set_lane1_link_training(struct analogix_dp_device *dp, - u32 training_lane) -{ - u32 reg; - - reg = training_lane; - writel(reg, dp->reg_base + ANALOGIX_DP_LN1_LINK_TRAINING_CTL); -} - -void analogix_dp_set_lane2_link_training(struct analogix_dp_device *dp, - u32 training_lane) -{ - u32 reg; - - reg = training_lane; - writel(reg, dp->reg_base + ANALOGIX_DP_LN2_LINK_TRAINING_CTL); -} - -void analogix_dp_set_lane3_link_training(struct analogix_dp_device *dp, - u32 training_lane) -{ - u32 reg; - - reg = training_lane; - writel(reg, dp->reg_base + ANALOGIX_DP_LN3_LINK_TRAINING_CTL); -} - -u32 analogix_dp_get_lane0_link_training(struct analogix_dp_device *dp) -{ - return readl(dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL); -} - -u32 analogix_dp_get_lane1_link_training(struct analogix_dp_device *dp) -{ - return readl(dp->reg_base + ANALOGIX_DP_LN1_LINK_TRAINING_CTL); -} - -u32 analogix_dp_get_lane2_link_training(struct analogix_dp_device *dp) -{ - return readl(dp->reg_base + ANALOGIX_DP_LN2_LINK_TRAINING_CTL); -} - -u32 analogix_dp_get_lane3_link_training(struct analogix_dp_device *dp) -{ - return readl(dp->reg_base + ANALOGIX_DP_LN3_LINK_TRAINING_CTL); -} - void analogix_dp_reset_macro(struct analogix_dp_device *dp) { u32 reg; - reg = readl(dp->reg_base + ANALOGIX_DP_PHY_TEST); + reg = analogix_dp_read(dp, ANALOGIX_DP_PHY_TEST); reg |= MACRO_RST; - writel(reg, dp->reg_base + ANALOGIX_DP_PHY_TEST); + analogix_dp_write(dp, ANALOGIX_DP_PHY_TEST, reg); /* 10 us is the minimum reset time. */ usleep_range(10, 20); reg &= ~MACRO_RST; - writel(reg, dp->reg_base + ANALOGIX_DP_PHY_TEST); + analogix_dp_write(dp, ANALOGIX_DP_PHY_TEST, reg); } void analogix_dp_init_video(struct analogix_dp_device *dp) @@ -814,19 +757,19 @@ void analogix_dp_init_video(struct analogix_dp_device *dp) u32 reg; reg = VSYNC_DET | VID_FORMAT_CHG | VID_CLK_CHG; - writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_1); + analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_STA_1, reg); reg = 0x0; - writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_1); + analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_1, reg); reg = CHA_CRI(4) | CHA_CTRL; - writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_2); + analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_2, reg); reg = 0x0; - writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_3); + analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_3, reg); reg = VID_HRES_TH(2) | VID_VRES_TH(0); - writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_8); + analogix_dp_write(dp, ANALOGIX_DP_VIDEO_CTL_8, reg); } void analogix_dp_set_video_color_format(struct analogix_dp_device *dp) @@ -837,36 +780,36 @@ void analogix_dp_set_video_color_format(struct analogix_dp_device *dp) reg = (dp->video_info.dynamic_range << IN_D_RANGE_SHIFT) | (dp->video_info.color_depth << IN_BPC_SHIFT) | (dp->video_info.color_space << IN_COLOR_F_SHIFT); - writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_2); + analogix_dp_write(dp, ANALOGIX_DP_VIDEO_CTL_2, reg); /* Set Input Color YCbCr Coefficients to ITU601 or ITU709 */ - reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_3); + reg = analogix_dp_read(dp, ANALOGIX_DP_VIDEO_CTL_3); reg &= ~IN_YC_COEFFI_MASK; if (dp->video_info.ycbcr_coeff) reg |= IN_YC_COEFFI_ITU709; else reg |= IN_YC_COEFFI_ITU601; - writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_3); + analogix_dp_write(dp, ANALOGIX_DP_VIDEO_CTL_3, reg); } int analogix_dp_is_slave_video_stream_clock_on(struct analogix_dp_device *dp) { u32 reg; - reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_1); - writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_1); + reg = analogix_dp_read(dp, ANALOGIX_DP_SYS_CTL_1); + analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_1, reg); - reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_1); + reg = analogix_dp_read(dp, ANALOGIX_DP_SYS_CTL_1); if (!(reg & DET_STA)) { dev_dbg(dp->dev, "Input stream clock not detected.\n"); return -EINVAL; } - reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_2); - writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_2); + reg = analogix_dp_read(dp, ANALOGIX_DP_SYS_CTL_2); + analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_2, reg); - reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_2); + reg = analogix_dp_read(dp, ANALOGIX_DP_SYS_CTL_2); dev_dbg(dp->dev, "wait SYS_CTL_2.\n"); if (reg & CHA_STA) { @@ -884,30 +827,30 @@ void analogix_dp_set_video_cr_mn(struct analogix_dp_device *dp, u32 reg; if (type == REGISTER_M) { - reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_4); + reg = analogix_dp_read(dp, ANALOGIX_DP_SYS_CTL_4); reg |= FIX_M_VID; - writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_4); + analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_4, reg); reg = m_value & 0xff; - writel(reg, dp->reg_base + ANALOGIX_DP_M_VID_0); + analogix_dp_write(dp, ANALOGIX_DP_M_VID_0, reg); reg = (m_value >> 8) & 0xff; - writel(reg, dp->reg_base + ANALOGIX_DP_M_VID_1); + analogix_dp_write(dp, ANALOGIX_DP_M_VID_1, reg); reg = (m_value >> 16) & 0xff; - writel(reg, dp->reg_base + ANALOGIX_DP_M_VID_2); + analogix_dp_write(dp, ANALOGIX_DP_M_VID_2, reg); reg = n_value & 0xff; - writel(reg, dp->reg_base + ANALOGIX_DP_N_VID_0); + analogix_dp_write(dp, ANALOGIX_DP_N_VID_0, reg); reg = (n_value >> 8) & 0xff; - writel(reg, dp->reg_base + ANALOGIX_DP_N_VID_1); + analogix_dp_write(dp, ANALOGIX_DP_N_VID_1, reg); reg = (n_value >> 16) & 0xff; - writel(reg, dp->reg_base + ANALOGIX_DP_N_VID_2); + analogix_dp_write(dp, ANALOGIX_DP_N_VID_2, reg); } else { - reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_4); + reg = analogix_dp_read(dp, ANALOGIX_DP_SYS_CTL_4); reg &= ~FIX_M_VID; - writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_4); + analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_4, reg); - writel(0x00, dp->reg_base + ANALOGIX_DP_N_VID_0); - writel(0x80, dp->reg_base + ANALOGIX_DP_N_VID_1); - writel(0x00, dp->reg_base + ANALOGIX_DP_N_VID_2); + analogix_dp_write(dp, ANALOGIX_DP_N_VID_0, 0x00); + analogix_dp_write(dp, ANALOGIX_DP_N_VID_1, 0x80); + analogix_dp_write(dp, ANALOGIX_DP_N_VID_2, 0x00); } } @@ -916,13 +859,13 @@ void analogix_dp_set_video_timing_mode(struct analogix_dp_device *dp, u32 type) u32 reg; if (type == VIDEO_TIMING_FROM_CAPTURE) { - reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10); + reg = analogix_dp_read(dp, ANALOGIX_DP_VIDEO_CTL_10); reg &= ~FORMAT_SEL; - writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10); + analogix_dp_write(dp, ANALOGIX_DP_VIDEO_CTL_10, reg); } else { - reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10); + reg = analogix_dp_read(dp, ANALOGIX_DP_VIDEO_CTL_10); reg |= FORMAT_SEL; - writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10); + analogix_dp_write(dp, ANALOGIX_DP_VIDEO_CTL_10, reg); } } @@ -931,15 +874,15 @@ void analogix_dp_enable_video_master(struct analogix_dp_device *dp, bool enable) u32 reg; if (enable) { - reg = readl(dp->reg_base + ANALOGIX_DP_SOC_GENERAL_CTL); + reg = analogix_dp_read(dp, ANALOGIX_DP_SOC_GENERAL_CTL); reg &= ~VIDEO_MODE_MASK; reg |= VIDEO_MASTER_MODE_EN | VIDEO_MODE_MASTER_MODE; - writel(reg, dp->reg_base + ANALOGIX_DP_SOC_GENERAL_CTL); + analogix_dp_write(dp, ANALOGIX_DP_SOC_GENERAL_CTL, reg); } else { - reg = readl(dp->reg_base + ANALOGIX_DP_SOC_GENERAL_CTL); + reg = analogix_dp_read(dp, ANALOGIX_DP_SOC_GENERAL_CTL); reg &= ~VIDEO_MODE_MASK; reg |= VIDEO_MODE_SLAVE_MODE; - writel(reg, dp->reg_base + ANALOGIX_DP_SOC_GENERAL_CTL); + analogix_dp_write(dp, ANALOGIX_DP_SOC_GENERAL_CTL, reg); } } @@ -947,19 +890,19 @@ void analogix_dp_start_video(struct analogix_dp_device *dp) { u32 reg; - reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1); + reg = analogix_dp_read(dp, ANALOGIX_DP_VIDEO_CTL_1); reg |= VIDEO_EN; - writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1); + analogix_dp_write(dp, ANALOGIX_DP_VIDEO_CTL_1, reg); } int analogix_dp_is_video_stream_on(struct analogix_dp_device *dp) { u32 reg; - reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_3); - writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_3); + reg = analogix_dp_read(dp, ANALOGIX_DP_SYS_CTL_3); + analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_3, reg); - reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_3); + reg = analogix_dp_read(dp, ANALOGIX_DP_SYS_CTL_3); if (!(reg & STRM_VALID)) { dev_dbg(dp->dev, "Input video stream is not detected.\n"); return -EINVAL; @@ -972,55 +915,55 @@ void analogix_dp_config_video_slave_mode(struct analogix_dp_device *dp) { u32 reg; - reg = readl(dp->reg_base + ANALOGIX_DP_FUNC_EN_1); + reg = analogix_dp_read(dp, ANALOGIX_DP_FUNC_EN_1); if (dp->plat_data && is_rockchip(dp->plat_data->dev_type)) { reg &= ~(RK_VID_CAP_FUNC_EN_N | RK_VID_FIFO_FUNC_EN_N); } else { reg &= ~(MASTER_VID_FUNC_EN_N | SLAVE_VID_FUNC_EN_N); reg |= MASTER_VID_FUNC_EN_N; } - writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_1); + analogix_dp_write(dp, ANALOGIX_DP_FUNC_EN_1, reg); - reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10); + reg = analogix_dp_read(dp, ANALOGIX_DP_VIDEO_CTL_10); reg &= ~INTERACE_SCAN_CFG; reg |= (dp->video_info.interlaced << 2); - writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10); + analogix_dp_write(dp, ANALOGIX_DP_VIDEO_CTL_10, reg); - reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10); + reg = analogix_dp_read(dp, ANALOGIX_DP_VIDEO_CTL_10); reg &= ~VSYNC_POLARITY_CFG; reg |= (dp->video_info.v_sync_polarity << 1); - writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10); + analogix_dp_write(dp, ANALOGIX_DP_VIDEO_CTL_10, reg); - reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10); + reg = analogix_dp_read(dp, ANALOGIX_DP_VIDEO_CTL_10); reg &= ~HSYNC_POLARITY_CFG; reg |= (dp->video_info.h_sync_polarity << 0); - writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10); + analogix_dp_write(dp, ANALOGIX_DP_VIDEO_CTL_10, reg); reg = AUDIO_MODE_SPDIF_MODE | VIDEO_MODE_SLAVE_MODE; - writel(reg, dp->reg_base + ANALOGIX_DP_SOC_GENERAL_CTL); + analogix_dp_write(dp, ANALOGIX_DP_SOC_GENERAL_CTL, reg); } void analogix_dp_enable_scrambling(struct analogix_dp_device *dp) { u32 reg; - reg = readl(dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET); + reg = analogix_dp_read(dp, ANALOGIX_DP_TRAINING_PTN_SET); reg &= ~SCRAMBLING_DISABLE; - writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET); + analogix_dp_write(dp, ANALOGIX_DP_TRAINING_PTN_SET, reg); } void analogix_dp_disable_scrambling(struct analogix_dp_device *dp) { u32 reg; - reg = readl(dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET); + reg = analogix_dp_read(dp, ANALOGIX_DP_TRAINING_PTN_SET); reg |= SCRAMBLING_DISABLE; - writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET); + analogix_dp_write(dp, ANALOGIX_DP_TRAINING_PTN_SET, reg); } void analogix_dp_enable_psr_crc(struct analogix_dp_device *dp) { - writel(PSR_VID_CRC_ENABLE, dp->reg_base + ANALOGIX_DP_CRC_CON); + analogix_dp_write(dp, ANALOGIX_DP_CRC_CON, PSR_VID_CRC_ENABLE); } static ssize_t analogix_dp_get_psr_status(struct analogix_dp_device *dp) @@ -1036,6 +979,24 @@ static ssize_t analogix_dp_get_psr_status(struct analogix_dp_device *dp) return status; } +static void analogix_dp_reuse_spd(struct analogix_dp_device *dp) +{ + u32 reg, val; + + switch (dp->plat_data->dev_type) { + // case RK3588_EDP: + // reg = ANALOGIX_DP_SPDIF_AUDIO_CTL_0; + // break; + default: + reg = ANALOGIX_DP_VIDEO_CTL_3; + break; + } + + val = analogix_dp_read(dp, reg); + val |= REUSE_SPD_EN; + analogix_dp_write(dp, reg, val); +} + int analogix_dp_send_psr_spd(struct analogix_dp_device *dp, struct dp_sdp *vsc, bool blocking) { @@ -1044,44 +1005,47 @@ int analogix_dp_send_psr_spd(struct analogix_dp_device *dp, ssize_t psr_status; /* don't send info frame */ - val = readl(dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL); + val = analogix_dp_read(dp, ANALOGIX_DP_PKT_SEND_CTL); val &= ~IF_EN; - writel(val, dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL); + analogix_dp_write(dp, ANALOGIX_DP_PKT_SEND_CTL, val); /* configure single frame update mode */ - writel(PSR_FRAME_UP_TYPE_BURST | PSR_CRC_SEL_HARDWARE, - dp->reg_base + ANALOGIX_DP_PSR_FRAME_UPDATE_CTRL); + analogix_dp_write(dp, ANALOGIX_DP_PSR_FRAME_UPDATE_CTRL, + PSR_FRAME_UP_TYPE_BURST | PSR_CRC_SEL_HARDWARE); /* configure VSC HB0~HB3 */ - writel(vsc->sdp_header.HB0, dp->reg_base + ANALOGIX_DP_SPD_HB0); - writel(vsc->sdp_header.HB1, dp->reg_base + ANALOGIX_DP_SPD_HB1); - writel(vsc->sdp_header.HB2, dp->reg_base + ANALOGIX_DP_SPD_HB2); - writel(vsc->sdp_header.HB3, dp->reg_base + ANALOGIX_DP_SPD_HB3); + analogix_dp_write(dp, ANALOGIX_DP_SPD_HB0, vsc->sdp_header.HB0); + analogix_dp_write(dp, ANALOGIX_DP_SPD_HB1, vsc->sdp_header.HB1); + analogix_dp_write(dp, ANALOGIX_DP_SPD_HB2, vsc->sdp_header.HB2); + analogix_dp_write(dp, ANALOGIX_DP_SPD_HB3, vsc->sdp_header.HB3); /* configure reused VSC PB0~PB3, magic number from vendor */ - writel(0x00, dp->reg_base + ANALOGIX_DP_SPD_PB0); - writel(0x16, dp->reg_base + ANALOGIX_DP_SPD_PB1); - writel(0xCE, dp->reg_base + ANALOGIX_DP_SPD_PB2); - writel(0x5D, dp->reg_base + ANALOGIX_DP_SPD_PB3); + analogix_dp_write(dp, ANALOGIX_DP_SPD_PB0, 0x00); + analogix_dp_write(dp, ANALOGIX_DP_SPD_PB1, 0x16); + analogix_dp_write(dp, ANALOGIX_DP_SPD_PB2, 0xCE); + analogix_dp_write(dp, ANALOGIX_DP_SPD_PB3, 0x5D); /* configure DB0 / DB1 values */ - writel(vsc->db[0], dp->reg_base + ANALOGIX_DP_VSC_SHADOW_DB0); - writel(vsc->db[1], dp->reg_base + ANALOGIX_DP_VSC_SHADOW_DB1); + analogix_dp_write(dp, ANALOGIX_DP_VSC_SHADOW_DB0, vsc->db[0]); + analogix_dp_write(dp, ANALOGIX_DP_VSC_SHADOW_DB1, vsc->db[1]); + + /* configure PB0 / PB1 values */ + analogix_dp_write(dp, ANALOGIX_DP_VSC_SHADOW_PB0, + vsc->db[1] ? 0x8d : 0x00); + analogix_dp_write(dp, ANALOGIX_DP_VSC_SHADOW_PB1, 0x00); /* set reuse spd inforframe */ - val = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_3); - val |= REUSE_SPD_EN; - writel(val, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_3); + analogix_dp_reuse_spd(dp); /* mark info frame update */ - val = readl(dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL); + val = analogix_dp_read(dp, ANALOGIX_DP_PKT_SEND_CTL); val = (val | IF_UP) & ~IF_EN; - writel(val, dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL); + analogix_dp_write(dp, ANALOGIX_DP_PKT_SEND_CTL, val); /* send info frame */ - val = readl(dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL); + val = analogix_dp_read(dp, ANALOGIX_DP_PKT_SEND_CTL); val |= IF_EN; - writel(val, dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL); + analogix_dp_write(dp, ANALOGIX_DP_PKT_SEND_CTL, val); if (!blocking) return 0; @@ -1108,11 +1072,46 @@ int analogix_dp_send_psr_spd(struct analogix_dp_device *dp, return 0; } +int analogix_dp_phy_power_on(struct analogix_dp_device *dp) +{ + int ret; + + ret = phy_set_mode(dp->phy, PHY_MODE_DP); + if (ret) { + dev_err(dp->dev, "phy_set_mode failed: %d\n", ret); + return ret; + } + + ret = phy_power_on(dp->phy); + if (ret) { + dev_err(dp->dev, "phy_power_on failed: %d\n", ret); + return ret; + } + + return ret; +} + +void analogix_dp_phy_power_off(struct analogix_dp_device *dp) +{ + phy_power_off(dp->phy); +} + +enum { + AUX_STATUS_OK, + AUX_STATUS_NACK_ERROR, + AUX_STATUS_TIMEOUT_ERROR, + AUX_STATUS_UNKNOWN_ERROR, + AUX_STATUS_MUCH_DEFER_ERROR, + AUX_STATUS_TX_SHORT_ERROR, + AUX_STATUS_RX_SHORT_ERROR, + AUX_STATUS_NACK_WITHOUT_M_ERROR, + AUX_STATUS_I2C_NACK_ERROR +}; + ssize_t analogix_dp_transfer(struct analogix_dp_device *dp, struct drm_dp_aux_msg *msg) { u32 reg; - u32 status_reg; u8 *buffer = msg->buffer; unsigned int i; int num_transferred = 0; @@ -1124,7 +1123,7 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp, /* Clear AUX CH data buffer */ reg = BUF_CLR; - writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL); + analogix_dp_write(dp, ANALOGIX_DP_BUFFER_DATA_CTL, reg); switch (msg->request & ~DP_AUX_I2C_MOT) { case DP_AUX_I2C_WRITE: @@ -1152,21 +1151,21 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp, } reg |= AUX_LENGTH(msg->size); - writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1); + analogix_dp_write(dp, ANALOGIX_DP_AUX_CH_CTL_1, reg); /* Select DPCD device address */ reg = AUX_ADDR_7_0(msg->address); - writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_7_0); + analogix_dp_write(dp, ANALOGIX_DP_AUX_ADDR_7_0, reg); reg = AUX_ADDR_15_8(msg->address); - writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_15_8); + analogix_dp_write(dp, ANALOGIX_DP_AUX_ADDR_15_8, reg); reg = AUX_ADDR_19_16(msg->address); - writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_19_16); + analogix_dp_write(dp, ANALOGIX_DP_AUX_ADDR_19_16, reg); if (!(msg->request & DP_AUX_I2C_READ)) { for (i = 0; i < msg->size; i++) { reg = buffer[i]; - writel(reg, dp->reg_base + ANALOGIX_DP_BUF_DATA_0 + - 4 * i); + analogix_dp_write(dp, ANALOGIX_DP_BUF_DATA_0 + 4 * i, + reg); num_transferred++; } } @@ -1178,7 +1177,7 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp, if (msg->size < 1) reg |= ADDR_ONLY; - writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2); + analogix_dp_write(dp, ANALOGIX_DP_AUX_CH_CTL_2, reg); ret = readx_poll_timeout(readl, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2, reg, !(reg & AUX_EN), 25, 500 * 1000); @@ -1197,30 +1196,31 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp, } /* Clear interrupt source for AUX CH command reply */ - writel(RPLY_RECEIV, dp->reg_base + ANALOGIX_DP_INT_STA); + analogix_dp_write(dp, ANALOGIX_DP_INT_STA, RPLY_RECEIV); - /* Clear interrupt source for AUX CH access error */ - reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA); - status_reg = readl(dp->reg_base + ANALOGIX_DP_AUX_CH_STA); - if ((reg & AUX_ERR) || (status_reg & AUX_STATUS_MASK)) { - writel(AUX_ERR, dp->reg_base + ANALOGIX_DP_INT_STA); - - dev_warn(dp->dev, "AUX CH error happened: %#x (%d)\n", - status_reg & AUX_STATUS_MASK, !!(reg & AUX_ERR)); - goto aux_error; - } + reg = analogix_dp_read(dp, ANALOGIX_DP_AUX_CH_STA); + if ((reg & AUX_STATUS_MASK) == AUX_STATUS_TIMEOUT_ERROR) + return -ETIMEDOUT; if (msg->request & DP_AUX_I2C_READ) { + size_t buf_data_count; + + reg = analogix_dp_read(dp, ANALOGIX_DP_BUFFER_DATA_CTL); + buf_data_count = BUF_DATA_COUNT(reg); + + if (buf_data_count != msg->size) + return -EBUSY; + for (i = 0; i < msg->size; i++) { - reg = readl(dp->reg_base + ANALOGIX_DP_BUF_DATA_0 + - 4 * i); + reg = analogix_dp_read(dp, ANALOGIX_DP_BUF_DATA_0 + + 4 * i); buffer[i] = (unsigned char)reg; num_transferred++; } } /* Check if Rx sends defer */ - reg = readl(dp->reg_base + ANALOGIX_DP_AUX_RX_COMM); + reg = analogix_dp_read(dp, ANALOGIX_DP_AUX_RX_COMM); if (reg == AUX_RX_COMM_AUX_DEFER) msg->reply = DP_AUX_NATIVE_REPLY_DEFER; else if (reg == AUX_RX_COMM_I2C_DEFER) @@ -1232,7 +1232,7 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp, (msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_READ) msg->reply = DP_AUX_NATIVE_REPLY_ACK; - return num_transferred > 0 ? num_transferred : -EBUSY; + return (num_transferred == msg->size) ? num_transferred : -EBUSY; aux_error: /* if aux err happen, reset aux */ @@ -1240,3 +1240,127 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp, return -EREMOTEIO; } + +void analogix_dp_set_video_format(struct analogix_dp_device *dp) +{ + struct video_info *video = &dp->video_info; + const struct drm_display_mode *mode = &video->mode; + unsigned int hsw, hfp, hbp, vsw, vfp, vbp; + + hsw = mode->hsync_end - mode->hsync_start; + hfp = mode->hsync_start - mode->hdisplay; + hbp = mode->htotal - mode->hsync_end; + vsw = mode->vsync_end - mode->vsync_start; + vfp = mode->vsync_start - mode->vdisplay; + vbp = mode->vtotal - mode->vsync_end; + + /* Set Video Format Parameters */ + analogix_dp_write(dp, ANALOGIX_DP_TOTAL_LINE_CFG_L, + TOTAL_LINE_CFG_L(mode->vtotal)); + analogix_dp_write(dp, ANALOGIX_DP_TOTAL_LINE_CFG_H, + TOTAL_LINE_CFG_H(mode->vtotal >> 8)); + analogix_dp_write(dp, ANALOGIX_DP_ACTIVE_LINE_CFG_L, + ACTIVE_LINE_CFG_L(mode->vdisplay)); + analogix_dp_write(dp, ANALOGIX_DP_ACTIVE_LINE_CFG_H, + ACTIVE_LINE_CFG_H(mode->vdisplay >> 8)); + analogix_dp_write(dp, ANALOGIX_DP_V_F_PORCH_CFG, + V_F_PORCH_CFG(vfp)); + analogix_dp_write(dp, ANALOGIX_DP_V_SYNC_WIDTH_CFG, + V_SYNC_WIDTH_CFG(vsw)); + analogix_dp_write(dp, ANALOGIX_DP_V_B_PORCH_CFG, + V_B_PORCH_CFG(vbp)); + analogix_dp_write(dp, ANALOGIX_DP_TOTAL_PIXEL_CFG_L, + TOTAL_PIXEL_CFG_L(mode->htotal)); + analogix_dp_write(dp, ANALOGIX_DP_TOTAL_PIXEL_CFG_H, + TOTAL_PIXEL_CFG_H(mode->htotal >> 8)); + analogix_dp_write(dp, ANALOGIX_DP_ACTIVE_PIXEL_CFG_L, + ACTIVE_PIXEL_CFG_L(mode->hdisplay)); + analogix_dp_write(dp, ANALOGIX_DP_ACTIVE_PIXEL_CFG_H, + ACTIVE_PIXEL_CFG_H(mode->hdisplay >> 8)); + analogix_dp_write(dp, ANALOGIX_DP_H_F_PORCH_CFG_L, + H_F_PORCH_CFG_L(hfp)); + analogix_dp_write(dp, ANALOGIX_DP_H_F_PORCH_CFG_H, + H_F_PORCH_CFG_H(hfp >> 8)); + analogix_dp_write(dp, ANALOGIX_DP_H_SYNC_CFG_L, + H_SYNC_CFG_L(hsw)); + analogix_dp_write(dp, ANALOGIX_DP_H_SYNC_CFG_H, + H_SYNC_CFG_H(hsw >> 8)); + analogix_dp_write(dp, ANALOGIX_DP_H_B_PORCH_CFG_L, + H_B_PORCH_CFG_L(hbp)); + analogix_dp_write(dp, ANALOGIX_DP_H_B_PORCH_CFG_H, + H_B_PORCH_CFG_H(hbp >> 8)); +} + +void analogix_dp_video_bist_enable(struct analogix_dp_device *dp) +{ + u32 reg; + + /* Enable Video BIST */ + analogix_dp_write(dp, ANALOGIX_DP_VIDEO_CTL_4, BIST_EN); + + /* + * Note that if BIST_EN is set to 1, F_SEL must be cleared to 0 + * although video format information comes from registers set by user. + */ + reg = analogix_dp_read(dp, ANALOGIX_DP_VIDEO_CTL_10); + reg &= ~FORMAT_SEL; + analogix_dp_write(dp, ANALOGIX_DP_VIDEO_CTL_10, reg); +} + +void analogix_dp_audio_config_i2s(struct analogix_dp_device *dp) +{ + u32 reg; + + reg = analogix_dp_read(dp, ANALOGIX_DP_SYS_CTL_4); + reg &= ~FIX_M_AUD; + analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_4, reg); + + reg = analogix_dp_read(dp, ANALOGIX_DP_I2S_CTRL); + reg |= I2S_EN; + analogix_dp_write(dp, ANALOGIX_DP_I2S_CTRL, reg); +} + +void analogix_dp_audio_config_spdif(struct analogix_dp_device *dp) +{ + u32 reg; + + reg = analogix_dp_read(dp, ANALOGIX_DP_SYS_CTL_4); + reg &= ~FIX_M_AUD; + analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_4, reg); + + reg = analogix_dp_read(dp, ANALOGIX_DP_SPDIF_AUDIO_CTL_0); + reg |= AUD_SPDIF_EN; + analogix_dp_write(dp, ANALOGIX_DP_SPDIF_AUDIO_CTL_0, reg); +} + +void analogix_dp_audio_enable(struct analogix_dp_device *dp) +{ + u32 reg; + + reg = analogix_dp_read(dp, ANALOGIX_DP_FUNC_EN_1); + reg &= ~(AUD_FIFO_FUNC_EN_N | AUD_FUNC_EN_N); + analogix_dp_write(dp, ANALOGIX_DP_FUNC_EN_1, reg); + + reg = analogix_dp_read(dp, ANALOGIX_DP_AUD_CTL); + reg |= MISC_CTRL_RESET | DP_AUDIO_EN; + analogix_dp_write(dp, ANALOGIX_DP_AUD_CTL, reg); +} + +void analogix_dp_audio_disable(struct analogix_dp_device *dp) +{ + u32 reg; + + analogix_dp_write(dp, ANALOGIX_DP_AUD_CTL, 0); + + reg = analogix_dp_read(dp, ANALOGIX_DP_FUNC_EN_1); + reg |= AUD_FIFO_FUNC_EN_N | AUD_FUNC_EN_N; + analogix_dp_write(dp, ANALOGIX_DP_FUNC_EN_1, reg); +} + +void analogix_dp_init(struct analogix_dp_device *dp) +{ + analogix_dp_init_interrupt(dp); + analogix_dp_config_interrupt(dp); + analogix_dp_init_hpd(dp); + analogix_dp_init_aux(dp); +} diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h index e284ee8da58b..7658ff853dcc 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h @@ -15,9 +15,27 @@ #define ANALOGIX_DP_VIDEO_CTL_1 0x20 #define ANALOGIX_DP_VIDEO_CTL_2 0x24 #define ANALOGIX_DP_VIDEO_CTL_3 0x28 +#define ANALOGIX_DP_VIDEO_CTL_4 0x2C #define ANALOGIX_DP_VIDEO_CTL_8 0x3C #define ANALOGIX_DP_VIDEO_CTL_10 0x44 +#define ANALOGIX_DP_TOTAL_LINE_CFG_L 0x48 +#define ANALOGIX_DP_TOTAL_LINE_CFG_H 0x4C +#define ANALOGIX_DP_ACTIVE_LINE_CFG_L 0x50 +#define ANALOGIX_DP_ACTIVE_LINE_CFG_H 0x54 +#define ANALOGIX_DP_V_F_PORCH_CFG 0x58 +#define ANALOGIX_DP_V_SYNC_WIDTH_CFG 0x5C +#define ANALOGIX_DP_V_B_PORCH_CFG 0x60 +#define ANALOGIX_DP_TOTAL_PIXEL_CFG_L 0x64 +#define ANALOGIX_DP_TOTAL_PIXEL_CFG_H 0x68 +#define ANALOGIX_DP_ACTIVE_PIXEL_CFG_L 0x6C +#define ANALOGIX_DP_ACTIVE_PIXEL_CFG_H 0x70 +#define ANALOGIX_DP_H_F_PORCH_CFG_L 0x74 +#define ANALOGIX_DP_H_F_PORCH_CFG_H 0x78 +#define ANALOGIX_DP_H_SYNC_CFG_L 0x7C +#define ANALOGIX_DP_H_SYNC_CFG_H 0x80 +#define ANALOGIX_DP_H_B_PORCH_CFG_L 0x84 +#define ANALOGIX_DP_H_B_PORCH_CFG_H 0x88 #define ANALOGIX_DP_SPDIF_AUDIO_CTL_0 0xD8 @@ -27,6 +45,8 @@ #define ANALOGIX_DP_PLL_REG_4 0x9ec #define ANALOGIX_DP_PLL_REG_5 0xa00 +#define ANALOIGX_DP_SSC_REG 0x104 +#define ANALOGIX_DP_BIAS 0x124 #define ANALOGIX_DP_PD 0x12c #define ANALOGIX_DP_IF_TYPE 0x244 @@ -43,6 +63,8 @@ #define ANALOGIX_DP_PSR_FRAME_UPDATE_CTRL 0x318 #define ANALOGIX_DP_VSC_SHADOW_DB0 0x31C #define ANALOGIX_DP_VSC_SHADOW_DB1 0x320 +#define ANALOGIX_DP_VSC_SHADOW_PB0 0x33C +#define ANALOGIX_DP_VSC_SHADOW_PB1 0x340 #define ANALOGIX_DP_LANE_MAP 0x35C @@ -70,7 +92,7 @@ #define ANALOGIX_DP_SYS_CTL_2 0x604 #define ANALOGIX_DP_SYS_CTL_3 0x608 #define ANALOGIX_DP_SYS_CTL_4 0x60C - +#define ANALOGIX_DP_AUD_CTL 0x618 #define ANALOGIX_DP_PKT_SEND_CTL 0x640 #define ANALOGIX_DP_HDCP_CTL 0x648 @@ -116,8 +138,9 @@ #define ANALOGIX_DP_BUF_DATA_0 0x7C0 #define ANALOGIX_DP_SOC_GENERAL_CTL 0x800 - +#define ANALOGIX_DP_AUD_CHANNEL_CTL 0x834 #define ANALOGIX_DP_CRC_CON 0x890 +#define ANALOGIX_DP_I2S_CTRL 0x9C8 /* ANALOGIX_DP_TX_SW_RESET */ #define RESET_DP_TX (0x1 << 0) @@ -171,6 +194,11 @@ #define VID_CHK_UPDATE_TYPE_0 (0x0 << 4) #define REUSE_SPD_EN (0x1 << 3) +/* ANALOGIX_DP_VIDEO_CTL_4 */ +#define BIST_EN (0x1 << 3) +#define BIST_WIDTH(x) (((x) & 0x1) << 2) +#define BIST_TYPE(x) (((x) & 0x3) << 0) + /* ANALOGIX_DP_VIDEO_CTL_8 */ #define VID_HRES_TH(x) (((x) & 0xf) << 4) #define VID_VRES_TH(x) (((x) & 0xf) << 0) @@ -181,6 +209,60 @@ #define VSYNC_POLARITY_CFG (0x1 << 1) #define HSYNC_POLARITY_CFG (0x1 << 0) +/* ANALOGIX_DP_TOTAL_LINE_CFG_L */ +#define TOTAL_LINE_CFG_L(x) (((x) & 0xff) << 0) + +/* ANALOGIX_DP_TOTAL_LINE_CFG_H */ +#define TOTAL_LINE_CFG_H(x) (((x) & 0xf) << 0) + +/* ANALOGIX_DP_ACTIVE_LINE_CFG_L */ +#define ACTIVE_LINE_CFG_L(x) (((x) & 0xff) << 0) + +/* ANALOGIX_DP_ACTIVE_LINE_CFG_H */ +#define ACTIVE_LINE_CFG_H(x) (((x) & 0xf) << 0) + +/* ANALOGIX_DP_V_F_PORCH_CFG */ +#define V_F_PORCH_CFG(x) (((x) & 0xff) << 0) + +/* ANALOGIX_DP_V_SYNC_WIDTH_CFG */ +#define V_SYNC_WIDTH_CFG(x) (((x) & 0xff) << 0) + +/* ANALOGIX_DP_V_B_PORCH_CFG */ +#define V_B_PORCH_CFG(x) (((x) & 0xff) << 0) + +/* ANALOGIX_DP_TOTAL_PIXEL_CFG_L */ +#define TOTAL_PIXEL_CFG_L(x) (((x) & 0xff) << 0) + +/* ANALOGIX_DP_TOTAL_PIXEL_CFG_H */ +#define TOTAL_PIXEL_CFG_H(x) (((x) & 0x3f) << 0) + +/* ANALOGIX_DP_ACTIVE_PIXEL_CFG_L */ +#define ACTIVE_PIXEL_CFG_L(x) (((x) & 0xff) << 0) + +/* ANALOGIX_DP_ACTIVE_PIXEL_CFG_H */ +#define ACTIVE_PIXEL_CFG_H(x) (((x) & 0x3f) << 0) + +/* ANALOGIX_DP_H_F_PORCH_CFG_L */ +#define H_F_PORCH_CFG_L(x) (((x) & 0xff) << 0) + +/* ANALOGIX_DP_H_F_PORCH_CFG_H */ +#define H_F_PORCH_CFG_H(x) (((x) & 0xf) << 0) + +/* ANALOGIX_DP_H_SYNC_CFG_L */ +#define H_SYNC_CFG_L(x) (((x) & 0xff) << 0) + +/* ANALOGIX_DP_H_SYNC_CFG_H */ +#define H_SYNC_CFG_H(x) (((x) & 0xf) << 0) + +/* ANALOGIX_DP_H_B_PORCH_CFG_L */ +#define H_B_PORCH_CFG_L(x) (((x) & 0xff) << 0) + +/* ANALOGIX_DP_H_B_PORCH_CFG_H */ +#define H_B_PORCH_CFG_H(x) (((x) & 0xf) << 0) + +/* ANALOGIX_DP_SPDIF_AUDIO_CTL_0 */ +#define AUD_SPDIF_EN (0x1 << 7) + /* ANALOGIX_DP_PLL_REG_1 */ #define REF_CLK_24M (0x1 << 0) #define REF_CLK_27M (0x0 << 0) @@ -309,6 +391,10 @@ #define FIX_M_VID (0x1 << 2) #define M_VID_UPDATE_CTRL (0x3 << 0) +/* ANALOGIX_DP_AUD_CTL */ +#define MISC_CTRL_RESET (0x1 << 4) +#define DP_AUDIO_EN (0x1 << 0) + /* ANALOGIX_DP_TRAINING_PTN_SET */ #define SCRAMBLER_TYPE (0x1 << 9) #define HW_LINK_TRAINING_PATTERN (0x1 << 8) @@ -319,6 +405,7 @@ #define LINK_QUAL_PATTERN_SET_D10_2 (0x1 << 2) #define LINK_QUAL_PATTERN_SET_DISABLE (0x0 << 2) #define SW_TRAINING_PATTERN_SET_MASK (0x3 << 0) +#define SW_TRAINING_PATTERN_SET_PTN3 (0x3 << 0) #define SW_TRAINING_PATTERN_SET_PTN2 (0x2 << 0) #define SW_TRAINING_PATTERN_SET_PTN1 (0x1 << 0) #define SW_TRAINING_PATTERN_SET_NORMAL (0x0 << 0) @@ -406,6 +493,11 @@ #define VIDEO_MODE_SLAVE_MODE (0x1 << 0) #define VIDEO_MODE_MASTER_MODE (0x0 << 0) +/* ANALOGIX_DP_AUD_CHANNEL_CTL */ +#define AUD_CHANNEL_COUNT_6 (0x5 << 0) +#define AUD_CHANNEL_COUNT_4 (0x3 << 0) +#define AUD_CHANNEL_COUNT_2 (0x1 << 0) + /* ANALOGIX_DP_PKT_SEND_CTL */ #define IF_UP (0x1 << 4) #define IF_EN (0x1 << 0) @@ -414,4 +506,7 @@ #define PSR_VID_CRC_FLUSH (0x1 << 2) #define PSR_VID_CRC_ENABLE (0x1 << 0) +/* ANALOGIX_DP_I2S_CTRL */ +#define I2S_EN (0x1 << 4) + #endif /* _ANALOGIX_DP_REG_H */ diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c index 544a47335cac..d24f5b90feab 100644 --- a/drivers/gpu/drm/bridge/display-connector.c +++ b/drivers/gpu/drm/bridge/display-connector.c @@ -11,7 +11,9 @@ #include #include #include +#include +#include #include #include @@ -20,6 +22,8 @@ struct display_connector { struct gpio_desc *hpd_gpio; int hpd_irq; + + struct regulator *dp_pwr; }; static inline struct display_connector * @@ -84,10 +88,95 @@ static struct edid *display_connector_get_edid(struct drm_bridge *bridge, return drm_get_edid(connector, conn->bridge.ddc); } +/* + * Since this bridge is tied to the connector, it acts like a passthrough, + * so concerning the output bus formats, either pass the bus formats from the + * previous bridge or return fallback data like done in the bridge function: + * drm_atomic_bridge_chain_select_bus_fmts(). + * This supports negotiation if the bridge chain has all bits in place. + */ +static u32 *display_connector_get_output_bus_fmts(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + unsigned int *num_output_fmts) +{ + struct drm_bridge *prev_bridge = drm_bridge_get_prev_bridge(bridge); + struct drm_bridge_state *prev_bridge_state; + + if (!prev_bridge || !prev_bridge->funcs->atomic_get_output_bus_fmts) { + struct drm_connector *conn = conn_state->connector; + u32 *out_bus_fmts; + + *num_output_fmts = 1; + out_bus_fmts = kmalloc(sizeof(*out_bus_fmts), GFP_KERNEL); + if (!out_bus_fmts) + return NULL; + + if (conn->display_info.num_bus_formats && + conn->display_info.bus_formats) + out_bus_fmts[0] = conn->display_info.bus_formats[0]; + else + out_bus_fmts[0] = MEDIA_BUS_FMT_FIXED; + + return out_bus_fmts; + } + + prev_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state, + prev_bridge); + + return prev_bridge->funcs->atomic_get_output_bus_fmts(prev_bridge, prev_bridge_state, + crtc_state, conn_state, + num_output_fmts); +} + +/* + * Since this bridge is tied to the connector, it acts like a passthrough, + * so concerning the input bus formats, either pass the bus formats from the + * previous bridge or MEDIA_BUS_FMT_FIXED (like select_bus_fmt_recursive()) + * when atomic_get_input_bus_fmts is not supported. + * This supports negotiation if the bridge chain has all bits in place. + */ +static u32 *display_connector_get_input_bus_fmts(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + u32 output_fmt, + unsigned int *num_input_fmts) +{ + struct drm_bridge *prev_bridge = drm_bridge_get_prev_bridge(bridge); + struct drm_bridge_state *prev_bridge_state; + + if (!prev_bridge || !prev_bridge->funcs->atomic_get_input_bus_fmts) { + u32 *in_bus_fmts; + + *num_input_fmts = 1; + in_bus_fmts = kmalloc(sizeof(*in_bus_fmts), GFP_KERNEL); + if (!in_bus_fmts) + return NULL; + + in_bus_fmts[0] = MEDIA_BUS_FMT_FIXED; + + return in_bus_fmts; + } + + prev_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state, + prev_bridge); + + return prev_bridge->funcs->atomic_get_input_bus_fmts(prev_bridge, prev_bridge_state, + crtc_state, conn_state, output_fmt, + num_input_fmts); +} + static const struct drm_bridge_funcs display_connector_bridge_funcs = { .attach = display_connector_attach, .detect = display_connector_detect, .get_edid = display_connector_get_edid, + .atomic_get_output_bus_fmts = display_connector_get_output_bus_fmts, + .atomic_get_input_bus_fmts = display_connector_get_input_bus_fmts, + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_reset = drm_atomic_helper_bridge_reset, }; static irqreturn_t display_connector_hpd_irq(int irq, void *arg) @@ -172,11 +261,12 @@ static int display_connector_probe(struct platform_device *pdev) of_property_read_string(pdev->dev.of_node, "label", &label); /* - * Get the HPD GPIO for DVI and HDMI connectors. If the GPIO can provide + * Get the HPD GPIO for DVI, HDMI and DP connectors. If the GPIO can provide * edge interrupts, register an interrupt handler. */ if (type == DRM_MODE_CONNECTOR_DVII || - type == DRM_MODE_CONNECTOR_HDMIA) { + type == DRM_MODE_CONNECTOR_HDMIA || + type == DRM_MODE_CONNECTOR_DisplayPort) { conn->hpd_gpio = devm_gpiod_get_optional(&pdev->dev, "hpd", GPIOD_IN); if (IS_ERR(conn->hpd_gpio)) { @@ -223,6 +313,38 @@ static int display_connector_probe(struct platform_device *pdev) } } + /* Get the DP PWR for DP connector. */ + if (type == DRM_MODE_CONNECTOR_DisplayPort) { + int ret; + + conn->dp_pwr = devm_regulator_get_optional(&pdev->dev, "dp-pwr"); + + if (IS_ERR(conn->dp_pwr)) { + ret = PTR_ERR(conn->dp_pwr); + + switch (ret) { + case -ENODEV: + conn->dp_pwr = NULL; + break; + + case -EPROBE_DEFER: + return -EPROBE_DEFER; + + default: + dev_err(&pdev->dev, "failed to get DP PWR regulator: %d\n", ret); + return ret; + } + } + + if (conn->dp_pwr) { + ret = regulator_enable(conn->dp_pwr); + if (ret) { + dev_err(&pdev->dev, "failed to enable DP PWR regulator: %d\n", ret); + return ret; + } + } + } + conn->bridge.funcs = &display_connector_bridge_funcs; conn->bridge.of_node = pdev->dev.of_node; @@ -251,6 +373,9 @@ static int display_connector_remove(struct platform_device *pdev) { struct display_connector *conn = platform_get_drvdata(pdev); + if (conn->dp_pwr) + regulator_disable(conn->dp_pwr); + drm_bridge_remove(&conn->bridge); if (!IS_ERR(conn->bridge.ddc)) @@ -275,6 +400,9 @@ static const struct of_device_id display_connector_match[] = { }, { .compatible = "vga-connector", .data = (void *)DRM_MODE_CONNECTOR_VGA, + }, { + .compatible = "dp-connector", + .data = (void *)DRM_MODE_CONNECTOR_DisplayPort, }, {}, }; diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c index 1dcc28a4d853..f95a75310258 100644 --- a/drivers/gpu/drm/bridge/lontium-lt9611.c +++ b/drivers/gpu/drm/bridge/lontium-lt9611.c @@ -868,14 +868,8 @@ static enum drm_mode_status lt9611_bridge_mode_valid(struct drm_bridge *bridge, const struct drm_display_mode *mode) { struct lt9611_mode *lt9611_mode = lt9611_find_mode(mode); - struct lt9611 *lt9611 = bridge_to_lt9611(bridge); - if (!lt9611_mode) - return MODE_BAD; - else if (lt9611_mode->intfs > 1 && !lt9611->dsi1) - return MODE_PANEL; - else - return MODE_OK; + return lt9611_mode ? MODE_OK : MODE_BAD; } static void lt9611_bridge_pre_enable(struct drm_bridge *bridge) diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c index b68d33598158..215da60e818f 100644 --- a/drivers/gpu/drm/bridge/nwl-dsi.c +++ b/drivers/gpu/drm/bridge/nwl-dsi.c @@ -196,7 +196,7 @@ static u32 ps2bc(struct nwl_dsi *dsi, unsigned long long ps) u32 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format); return DIV64_U64_ROUND_UP(ps * dsi->mode.clock * bpp, - dsi->lanes * 8ULL * NSEC_PER_SEC); + dsi->lanes * 8 * NSEC_PER_SEC); } /* diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c index 89558e581530..33a302a5432c 100644 --- a/drivers/gpu/drm/bridge/sii902x.c +++ b/drivers/gpu/drm/bridge/sii902x.c @@ -24,10 +24,12 @@ #include #include #include +#include #include #include #include +#include