1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2014-2016 Intel Corporation
5 */
6
7 #include <linux/highmem.h>
8 #include <linux/shmem_fs.h>
9 #include <linux/swap.h>
10
11 #include <drm/drm_cache.h>
12
13 #include "gt/intel_gt.h"
14 #include "i915_drv.h"
15 #include "i915_gem_object.h"
16 #include "i915_gem_region.h"
17 #include "i915_gem_tiling.h"
18 #include "i915_scatterlist.h"
19
i915_gem_object_get_pages_phys(struct drm_i915_gem_object * obj)20 static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
21 {
22 struct address_space *mapping = obj->base.filp->f_mapping;
23 struct drm_i915_private *i915 = to_i915(obj->base.dev);
24 struct scatterlist *sg;
25 struct sg_table *st;
26 dma_addr_t dma;
27 void *vaddr;
28 void *dst;
29 int i;
30
31 if (GEM_WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
32 return -EINVAL;
33
34 /*
35 * Always aligning to the object size, allows a single allocation
36 * to handle all possible callers, and given typical object sizes,
37 * the alignment of the buddy allocation will naturally match.
38 */
39 vaddr = dma_alloc_coherent(obj->base.dev->dev,
40 roundup_pow_of_two(obj->base.size),
41 &dma, GFP_KERNEL);
42 if (!vaddr)
43 return -ENOMEM;
44
45 st = kmalloc(sizeof(*st), GFP_KERNEL);
46 if (!st)
47 goto err_pci;
48
49 if (sg_alloc_table(st, 1, GFP_KERNEL))
50 goto err_st;
51
52 sg = st->sgl;
53 sg->offset = 0;
54 sg->length = obj->base.size;
55
56 sg_assign_page(sg, (struct page *)vaddr);
57 sg_dma_address(sg) = dma;
58 sg_dma_len(sg) = obj->base.size;
59
60 dst = vaddr;
61 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
62 struct page *page;
63 void *src;
64
65 page = shmem_read_mapping_page(mapping, i);
66 if (IS_ERR(page))
67 goto err_st;
68
69 src = kmap_atomic(page);
70 memcpy(dst, src, PAGE_SIZE);
71 drm_clflush_virt_range(dst, PAGE_SIZE);
72 kunmap_atomic(src);
73
74 put_page(page);
75 dst += PAGE_SIZE;
76 }
77
78 intel_gt_chipset_flush(to_gt(i915));
79
80 /* We're no longer struct page backed */
81 obj->mem_flags &= ~I915_BO_FLAG_STRUCT_PAGE;
82 __i915_gem_object_set_pages(obj, st, sg->length);
83
84 return 0;
85
86 err_st:
87 kfree(st);
88 err_pci:
89 dma_free_coherent(obj->base.dev->dev,
90 roundup_pow_of_two(obj->base.size),
91 vaddr, dma);
92 return -ENOMEM;
93 }
94
95 void
i915_gem_object_put_pages_phys(struct drm_i915_gem_object * obj,struct sg_table * pages)96 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
97 struct sg_table *pages)
98 {
99 dma_addr_t dma = sg_dma_address(pages->sgl);
100 void *vaddr = sg_page(pages->sgl);
101
102 __i915_gem_object_release_shmem(obj, pages, false);
103
104 if (obj->mm.dirty) {
105 struct address_space *mapping = obj->base.filp->f_mapping;
106 void *src = vaddr;
107 int i;
108
109 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
110 struct page *page;
111 char *dst;
112
113 page = shmem_read_mapping_page(mapping, i);
114 if (IS_ERR(page))
115 continue;
116
117 dst = kmap_atomic(page);
118 drm_clflush_virt_range(src, PAGE_SIZE);
119 memcpy(dst, src, PAGE_SIZE);
120 kunmap_atomic(dst);
121
122 set_page_dirty(page);
123 if (obj->mm.madv == I915_MADV_WILLNEED)
124 mark_page_accessed(page);
125 put_page(page);
126
127 src += PAGE_SIZE;
128 }
129 obj->mm.dirty = false;
130 }
131
132 sg_free_table(pages);
133 kfree(pages);
134
135 dma_free_coherent(obj->base.dev->dev,
136 roundup_pow_of_two(obj->base.size),
137 vaddr, dma);
138 }
139
i915_gem_object_pwrite_phys(struct drm_i915_gem_object * obj,const struct drm_i915_gem_pwrite * args)140 int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj,
141 const struct drm_i915_gem_pwrite *args)
142 {
143 void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
144 char __user *user_data = u64_to_user_ptr(args->data_ptr);
145 struct drm_i915_private *i915 = to_i915(obj->base.dev);
146 int err;
147
148 err = i915_gem_object_wait(obj,
149 I915_WAIT_INTERRUPTIBLE |
150 I915_WAIT_ALL,
151 MAX_SCHEDULE_TIMEOUT);
152 if (err)
153 return err;
154
155 /*
156 * We manually control the domain here and pretend that it
157 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
158 */
159 i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
160
161 if (copy_from_user(vaddr, user_data, args->size))
162 return -EFAULT;
163
164 drm_clflush_virt_range(vaddr, args->size);
165 intel_gt_chipset_flush(to_gt(i915));
166
167 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
168 return 0;
169 }
170
i915_gem_object_pread_phys(struct drm_i915_gem_object * obj,const struct drm_i915_gem_pread * args)171 int i915_gem_object_pread_phys(struct drm_i915_gem_object *obj,
172 const struct drm_i915_gem_pread *args)
173 {
174 void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
175 char __user *user_data = u64_to_user_ptr(args->data_ptr);
176 int err;
177
178 err = i915_gem_object_wait(obj,
179 I915_WAIT_INTERRUPTIBLE,
180 MAX_SCHEDULE_TIMEOUT);
181 if (err)
182 return err;
183
184 drm_clflush_virt_range(vaddr, args->size);
185 if (copy_to_user(user_data, vaddr, args->size))
186 return -EFAULT;
187
188 return 0;
189 }
190
i915_gem_object_shmem_to_phys(struct drm_i915_gem_object * obj)191 static int i915_gem_object_shmem_to_phys(struct drm_i915_gem_object *obj)
192 {
193 struct sg_table *pages;
194 int err;
195
196 pages = __i915_gem_object_unset_pages(obj);
197
198 err = i915_gem_object_get_pages_phys(obj);
199 if (err)
200 goto err_xfer;
201
202 /* Perma-pin (until release) the physical set of pages */
203 __i915_gem_object_pin_pages(obj);
204
205 if (!IS_ERR_OR_NULL(pages))
206 i915_gem_object_put_pages_shmem(obj, pages);
207
208 i915_gem_object_release_memory_region(obj);
209 return 0;
210
211 err_xfer:
212 if (!IS_ERR_OR_NULL(pages)) {
213 unsigned int sg_page_sizes = i915_sg_dma_sizes(pages->sgl);
214
215 __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
216 }
217 return err;
218 }
219
i915_gem_object_attach_phys(struct drm_i915_gem_object * obj,int align)220 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
221 {
222 int err;
223
224 assert_object_held(obj);
225
226 if (align > obj->base.size)
227 return -EINVAL;
228
229 if (!i915_gem_object_is_shmem(obj))
230 return -EINVAL;
231
232 if (!i915_gem_object_has_struct_page(obj))
233 return 0;
234
235 err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
236 if (err)
237 return err;
238
239 if (obj->mm.madv != I915_MADV_WILLNEED)
240 return -EFAULT;
241
242 if (i915_gem_object_has_tiling_quirk(obj))
243 return -EFAULT;
244
245 if (obj->mm.mapping || i915_gem_object_has_pinned_pages(obj))
246 return -EBUSY;
247
248 if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
249 drm_dbg(obj->base.dev,
250 "Attempting to obtain a purgeable object\n");
251 return -EFAULT;
252 }
253
254 return i915_gem_object_shmem_to_phys(obj);
255 }
256
257 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
258 #include "selftests/i915_gem_phys.c"
259 #endif
260