1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * (C) Copyright 2014 - 2015 Xilinx, Inc.
4 * Michal Simek <michal.simek@xilinx.com>
5 */
6
7 #include <common.h>
8 #include <asm/arch/hardware.h>
9 #include <asm/arch/sys_proto.h>
10 #include <asm/armv8/mmu.h>
11 #include <asm/io.h>
12 #include <zynqmp_firmware.h>
13
14 #define ZYNQ_SILICON_VER_MASK 0xF000
15 #define ZYNQ_SILICON_VER_SHIFT 12
16
17 DECLARE_GLOBAL_DATA_PTR;
18
19 /*
20 * Number of filled static entries and also the first empty
21 * slot in zynqmp_mem_map.
22 */
23 #define ZYNQMP_MEM_MAP_USED 4
24
25 #if !defined(CONFIG_ZYNQMP_NO_DDR)
26 #define DRAM_BANKS CONFIG_NR_DRAM_BANKS
27 #else
28 #define DRAM_BANKS 0
29 #endif
30
31 #if defined(CONFIG_DEFINE_TCM_OCM_MMAP)
32 #define TCM_MAP 1
33 #else
34 #define TCM_MAP 0
35 #endif
36
37 /* +1 is end of list which needs to be empty */
38 #define ZYNQMP_MEM_MAP_MAX (ZYNQMP_MEM_MAP_USED + DRAM_BANKS + TCM_MAP + 1)
39
40 static struct mm_region zynqmp_mem_map[ZYNQMP_MEM_MAP_MAX] = {
41 {
42 .virt = 0x80000000UL,
43 .phys = 0x80000000UL,
44 .size = 0x70000000UL,
45 .attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
46 PTE_BLOCK_NON_SHARE |
47 PTE_BLOCK_PXN | PTE_BLOCK_UXN
48 }, {
49 .virt = 0xf8000000UL,
50 .phys = 0xf8000000UL,
51 .size = 0x07e00000UL,
52 .attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
53 PTE_BLOCK_NON_SHARE |
54 PTE_BLOCK_PXN | PTE_BLOCK_UXN
55 }, {
56 .virt = 0x400000000UL,
57 .phys = 0x400000000UL,
58 .size = 0x400000000UL,
59 .attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
60 PTE_BLOCK_NON_SHARE |
61 PTE_BLOCK_PXN | PTE_BLOCK_UXN
62 }, {
63 .virt = 0x1000000000UL,
64 .phys = 0x1000000000UL,
65 .size = 0xf000000000UL,
66 .attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
67 PTE_BLOCK_NON_SHARE |
68 PTE_BLOCK_PXN | PTE_BLOCK_UXN
69 }
70 };
71
mem_map_fill(void)72 void mem_map_fill(void)
73 {
74 int banks = ZYNQMP_MEM_MAP_USED;
75
76 #if defined(CONFIG_DEFINE_TCM_OCM_MMAP)
77 zynqmp_mem_map[banks].virt = 0xffe00000UL;
78 zynqmp_mem_map[banks].phys = 0xffe00000UL;
79 zynqmp_mem_map[banks].size = 0x00200000UL;
80 zynqmp_mem_map[banks].attrs = PTE_BLOCK_MEMTYPE(MT_NORMAL) |
81 PTE_BLOCK_INNER_SHARE;
82 banks = banks + 1;
83 #endif
84
85 #if !defined(CONFIG_ZYNQMP_NO_DDR)
86 for (int i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
87 /* Zero size means no more DDR that's this is end */
88 if (!gd->bd->bi_dram[i].size)
89 break;
90
91 zynqmp_mem_map[banks].virt = gd->bd->bi_dram[i].start;
92 zynqmp_mem_map[banks].phys = gd->bd->bi_dram[i].start;
93 zynqmp_mem_map[banks].size = gd->bd->bi_dram[i].size;
94 zynqmp_mem_map[banks].attrs = PTE_BLOCK_MEMTYPE(MT_NORMAL) |
95 PTE_BLOCK_INNER_SHARE;
96 banks = banks + 1;
97 }
98 #endif
99 }
100
101 struct mm_region *mem_map = zynqmp_mem_map;
102
get_page_table_size(void)103 u64 get_page_table_size(void)
104 {
105 return 0x14000;
106 }
107
108 #if defined(CONFIG_SYS_MEM_RSVD_FOR_MMU) || defined(CONFIG_DEFINE_TCM_OCM_MMAP)
tcm_init(u8 mode)109 void tcm_init(u8 mode)
110 {
111 puts("WARNING: Initializing TCM overwrites TCM content\n");
112 initialize_tcm(mode);
113 memset((void *)ZYNQMP_TCM_BASE_ADDR, 0, ZYNQMP_TCM_SIZE);
114 }
115 #endif
116
117 #ifdef CONFIG_SYS_MEM_RSVD_FOR_MMU
reserve_mmu(void)118 int reserve_mmu(void)
119 {
120 tcm_init(TCM_LOCK);
121 gd->arch.tlb_size = PGTABLE_SIZE;
122 gd->arch.tlb_addr = ZYNQMP_TCM_BASE_ADDR;
123
124 return 0;
125 }
126 #endif
127
zynqmp_get_silicon_version_secure(void)128 static unsigned int zynqmp_get_silicon_version_secure(void)
129 {
130 u32 ver;
131
132 ver = readl(&csu_base->version);
133 ver &= ZYNQMP_SILICON_VER_MASK;
134 ver >>= ZYNQMP_SILICON_VER_SHIFT;
135
136 return ver;
137 }
138
zynqmp_get_silicon_version(void)139 unsigned int zynqmp_get_silicon_version(void)
140 {
141 if (current_el() == 3)
142 return zynqmp_get_silicon_version_secure();
143
144 gd->cpu_clk = get_tbclk();
145
146 switch (gd->cpu_clk) {
147 case 50000000:
148 return ZYNQMP_CSU_VERSION_QEMU;
149 }
150
151 return ZYNQMP_CSU_VERSION_SILICON;
152 }
153
zynqmp_mmio_rawwrite(const u32 address,const u32 mask,const u32 value)154 static int zynqmp_mmio_rawwrite(const u32 address,
155 const u32 mask,
156 const u32 value)
157 {
158 u32 data;
159 u32 value_local = value;
160 int ret;
161
162 ret = zynqmp_mmio_read(address, &data);
163 if (ret)
164 return ret;
165
166 data &= ~mask;
167 value_local &= mask;
168 value_local |= data;
169 writel(value_local, (ulong)address);
170 return 0;
171 }
172
zynqmp_mmio_rawread(const u32 address,u32 * value)173 static int zynqmp_mmio_rawread(const u32 address, u32 *value)
174 {
175 *value = readl((ulong)address);
176 return 0;
177 }
178
zynqmp_mmio_write(const u32 address,const u32 mask,const u32 value)179 int zynqmp_mmio_write(const u32 address,
180 const u32 mask,
181 const u32 value)
182 {
183 if (IS_ENABLED(CONFIG_SPL_BUILD) || current_el() == 3)
184 return zynqmp_mmio_rawwrite(address, mask, value);
185 #if defined(CONFIG_ZYNQMP_FIRMWARE)
186 else
187 return xilinx_pm_request(PM_MMIO_WRITE, address, mask,
188 value, 0, NULL);
189 #endif
190
191 return -EINVAL;
192 }
193
zynqmp_mmio_read(const u32 address,u32 * value)194 int zynqmp_mmio_read(const u32 address, u32 *value)
195 {
196 u32 ret = -EINVAL;
197
198 if (!value)
199 return ret;
200
201 if (IS_ENABLED(CONFIG_SPL_BUILD) || current_el() == 3) {
202 ret = zynqmp_mmio_rawread(address, value);
203 }
204 #if defined(CONFIG_ZYNQMP_FIRMWARE)
205 else {
206 u32 ret_payload[PAYLOAD_ARG_CNT];
207
208 ret = xilinx_pm_request(PM_MMIO_READ, address, 0, 0,
209 0, ret_payload);
210 *value = ret_payload[1];
211 }
212 #endif
213
214 return ret;
215 }
216