1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2020 Rockchip Electronics Co., Ltd. */
3
4 #include <linux/clk.h>
5 #include <linux/clk-provider.h>
6 #include <linux/slab.h>
7 #include <linux/io.h>
8 #include <linux/mfd/syscon.h>
9 #include <linux/module.h>
10 #include <linux/of.h>
11 #include <linux/of_address.h>
12 #include <linux/of_gpio.h>
13 #include <linux/of_graph.h>
14 #include <linux/of_platform.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/pinctrl/consumer.h>
17 #include <linux/regmap.h>
18 #include <linux/dma-buf.h>
19 #include <linux/highmem.h>
20
21 #include "rkisp_tb_helper.h"
22
23 static struct platform_device *rkisp_tb_pdev;
24
25 struct shm_data {
26 int npages;
27 struct page *pages[];
28 };
29
shm_map_dma_buf(struct dma_buf_attachment * attachment,enum dma_data_direction dir)30 static struct sg_table *shm_map_dma_buf(struct dma_buf_attachment *attachment,
31 enum dma_data_direction dir)
32 {
33 struct shm_data *data = attachment->dmabuf->priv;
34 struct sg_table *table;
35 struct scatterlist *sg;
36 int i;
37
38 table = kmalloc(sizeof(*table), GFP_KERNEL);
39 if (!table)
40 return ERR_PTR(-ENOMEM);
41
42 sg_alloc_table(table, data->npages, GFP_KERNEL);
43 sg = table->sgl;
44 for (i = 0; i < data->npages; i++) {
45 sg_set_page(sg, data->pages[i], PAGE_SIZE, 0);
46 sg = sg_next(sg);
47 }
48
49 dma_map_sg_attrs(attachment->dev, table->sgl, table->nents, dir, DMA_ATTR_SKIP_CPU_SYNC);
50
51 return table;
52 }
53
shm_unmap_dma_buf(struct dma_buf_attachment * attachment,struct sg_table * table,enum dma_data_direction dir)54 static void shm_unmap_dma_buf(struct dma_buf_attachment *attachment,
55 struct sg_table *table,
56 enum dma_data_direction dir)
57 {
58 dma_unmap_sg(attachment->dev, table->sgl, table->nents, dir);
59 sg_free_table(table);
60 kfree(table);
61 }
62
shm_release(struct dma_buf * dma_buf)63 static void shm_release(struct dma_buf *dma_buf)
64 {
65 struct shm_data *data = dma_buf->priv;
66
67 kfree(data);
68 }
69
shm_vmap(struct dma_buf * dma_buf)70 static void *shm_vmap(struct dma_buf *dma_buf)
71 {
72 struct shm_data *data = dma_buf->priv;
73
74 return vm_map_ram(data->pages, data->npages, 0);
75 }
76
shm_vunmap(struct dma_buf * dma_buf,void * vaddr)77 static void shm_vunmap(struct dma_buf *dma_buf, void *vaddr)
78 {
79 struct shm_data *data = dma_buf->priv;
80
81 vm_unmap_ram(vaddr, data->npages);
82 }
83
shm_mmap(struct dma_buf * dma_buf,struct vm_area_struct * vma)84 static int shm_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
85 {
86 struct shm_data *data = dma_buf->priv;
87 unsigned long vm_start = vma->vm_start;
88 int i;
89
90 for (i = 0; i < data->npages; i++) {
91 remap_pfn_range(vma, vm_start, page_to_pfn(data->pages[i]),
92 PAGE_SIZE, vma->vm_page_prot);
93 vm_start += PAGE_SIZE;
94 }
95
96 return 0;
97 }
98
shm_begin_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction dir)99 static int shm_begin_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction dir)
100 {
101 struct dma_buf_attachment *attachment;
102 struct sg_table *table;
103
104 attachment = list_first_entry(&dmabuf->attachments, struct dma_buf_attachment, node);
105 table = attachment->priv;
106 dma_sync_sg_for_cpu(NULL, table->sgl, table->nents, dir);
107
108 return 0;
109 }
110
shm_end_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction dir)111 static int shm_end_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction dir)
112 {
113 struct dma_buf_attachment *attachment;
114 struct sg_table *table;
115
116 attachment = list_first_entry(&dmabuf->attachments, struct dma_buf_attachment, node);
117 table = attachment->priv;
118 dma_sync_sg_for_device(NULL, table->sgl, table->nents, dir);
119
120 return 0;
121 }
122
123 static const struct dma_buf_ops shm_dmabuf_ops = {
124 .map_dma_buf = shm_map_dma_buf,
125 .unmap_dma_buf = shm_unmap_dma_buf,
126 .release = shm_release,
127 .mmap = shm_mmap,
128 .vmap = shm_vmap,
129 .vunmap = shm_vunmap,
130 .begin_cpu_access = shm_begin_cpu_access,
131 .end_cpu_access = shm_end_cpu_access,
132 };
133
shm_alloc(struct rkisp_thunderboot_shmem * shmem)134 static struct dma_buf *shm_alloc(struct rkisp_thunderboot_shmem *shmem)
135 {
136 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
137 struct dma_buf *dmabuf;
138 struct shm_data *data;
139 int i, npages;
140
141 npages = PAGE_ALIGN(shmem->shm_size) / PAGE_SIZE;
142 data = kmalloc(sizeof(*data) + npages * sizeof(struct page *), GFP_KERNEL);
143 if (!data)
144 return ERR_PTR(-ENOMEM);
145
146 data->npages = npages;
147 for (i = 0; i < npages; i++)
148 data->pages[i] = phys_to_page(shmem->shm_start + i * PAGE_SIZE);
149
150 exp_info.ops = &shm_dmabuf_ops;
151 exp_info.size = npages * PAGE_SIZE;
152 exp_info.flags = O_RDWR;
153 exp_info.priv = data;
154
155 dmabuf = dma_buf_export(&exp_info);
156
157 return dmabuf;
158 }
159
rkisp_tb_clocks_loader_protect(void)160 static int __maybe_unused rkisp_tb_clocks_loader_protect(void)
161 {
162 if (rkisp_tb_pdev) {
163 pm_runtime_enable(&rkisp_tb_pdev->dev);
164 pm_runtime_get_sync(&rkisp_tb_pdev->dev);
165 }
166 return 0;
167 }
168
rkisp_tb_clocks_loader_unprotect(void)169 static int __maybe_unused rkisp_tb_clocks_loader_unprotect(void)
170 {
171 if (rkisp_tb_pdev) {
172 pm_runtime_put_sync(&rkisp_tb_pdev->dev);
173 pm_runtime_disable(&rkisp_tb_pdev->dev);
174 }
175 return 0;
176 }
177
rkisp_tb_runtime_suspend(struct device * dev)178 static int __maybe_unused rkisp_tb_runtime_suspend(struct device *dev)
179 {
180 return 0;
181 }
182
rkisp_tb_runtime_resume(struct device * dev)183 static int __maybe_unused rkisp_tb_runtime_resume(struct device *dev)
184 {
185 return 0;
186 }
187
188 static const struct dev_pm_ops rkisp_tb_plat_pm_ops = {
189 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
190 pm_runtime_force_resume)
191 SET_RUNTIME_PM_OPS(rkisp_tb_runtime_suspend,
192 rkisp_tb_runtime_resume, NULL)
193 };
194
195 static const struct of_device_id rkisp_tb_plat_of_match[] = {
196 {
197 .compatible = "rockchip,thunder-boot-rkisp",
198 },
199 {},
200 };
201
rkisp_tb_plat_probe(struct platform_device * pdev)202 static int rkisp_tb_plat_probe(struct platform_device *pdev)
203 {
204 rkisp_tb_pdev = pdev;
205 rkisp_tb_clocks_loader_protect();
206 return 0;
207 }
208
rkisp_tb_plat_remove(struct platform_device * pdev)209 static int rkisp_tb_plat_remove(struct platform_device *pdev)
210 {
211 return 0;
212 }
213
214 static struct platform_driver __maybe_unused rkisp_tb_plat_drv = {
215 .driver = {
216 .name = "rkisp_thunderboot",
217 .of_match_table = of_match_ptr(rkisp_tb_plat_of_match),
218 .pm = &rkisp_tb_plat_pm_ops,
219 },
220 .probe = rkisp_tb_plat_probe,
221 .remove = rkisp_tb_plat_remove,
222 };
223
rkisp_tb_plat_drv_init(void)224 static int __init rkisp_tb_plat_drv_init(void)
225 {
226 return platform_driver_register(&rkisp_tb_plat_drv);
227 }
228
229 arch_initcall_sync(rkisp_tb_plat_drv_init);
230
rkisp_tb_shm_ioctl(struct rkisp_thunderboot_shmem * shmem)231 long rkisp_tb_shm_ioctl(struct rkisp_thunderboot_shmem *shmem)
232 {
233 struct dma_buf *dmabuf;
234 int fd, ret;
235
236 dmabuf = shm_alloc(shmem);
237 if (IS_ERR(dmabuf)) {
238 ret = PTR_ERR(dmabuf);
239 return ret;
240 }
241
242 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
243 shmem->shm_fd = fd;
244
245 return 0;
246 }
247
rkisp_tb_unprotect_clk(void)248 void rkisp_tb_unprotect_clk(void)
249 {
250 rkisp_tb_clocks_loader_unprotect();
251 }
252 EXPORT_SYMBOL(rkisp_tb_unprotect_clk);
253
254 static enum rkisp_tb_state tb_state;
255
rkisp_tb_set_state(enum rkisp_tb_state result)256 void rkisp_tb_set_state(enum rkisp_tb_state result)
257 {
258 tb_state = result;
259 }
260 EXPORT_SYMBOL(rkisp_tb_set_state);
261
rkisp_tb_get_state(void)262 enum rkisp_tb_state rkisp_tb_get_state(void)
263 {
264 return tb_state;
265 }
266 EXPORT_SYMBOL(rkisp_tb_get_state);
267