1 /*
2 * Copyright (C) 2019 Collabora, Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
25 */
26
27 #include <xf86drm.h>
28
29 #include "drm-uapi/panfrost_drm.h"
30 #include "util/hash_table.h"
31 #include "util/macros.h"
32 #include "util/u_math.h"
33 #include "util/u_thread.h"
34 #include "pan_bo.h"
35 #include "pan_device.h"
36 #include "pan_encoder.h"
37 #include "pan_samples.h"
38 #include "pan_texture.h"
39 #include "pan_util.h"
40 #include "wrap.h"
41
42 /* DRM_PANFROST_PARAM_TEXTURE_FEATURES0 will return a bitmask of supported
43 * compressed formats, so we offer a helper to test if a format is supported */
44
45 bool
panfrost_supports_compressed_format(struct panfrost_device * dev,unsigned fmt)46 panfrost_supports_compressed_format(struct panfrost_device *dev, unsigned fmt)
47 {
48 if (MALI_EXTRACT_TYPE(fmt) != MALI_FORMAT_COMPRESSED)
49 return true;
50
51 unsigned idx = fmt & ~MALI_FORMAT_COMPRESSED;
52 assert(idx < 32);
53
54 return panfrost_query_compressed_formats(&dev->kmod.props) & (1 << idx);
55 }
56
57 /* Always reserve the lower 32MB. */
58 #define PANFROST_VA_RESERVE_BOTTOM 0x2000000ull
59
60 void
panfrost_open_device(void * memctx,int fd,struct panfrost_device * dev)61 panfrost_open_device(void *memctx, int fd, struct panfrost_device *dev)
62 {
63 dev->memctx = memctx;
64
65 dev->kmod.dev = pan_kmod_dev_create(fd, PAN_KMOD_DEV_FLAG_OWNS_FD, NULL);
66 if (!dev->kmod.dev) {
67 close(fd);
68 return;
69 }
70
71 pan_kmod_dev_query_props(dev->kmod.dev, &dev->kmod.props);
72
73 dev->arch = pan_arch(dev->kmod.props.gpu_prod_id);
74 dev->model = panfrost_get_model(dev->kmod.props.gpu_prod_id);
75
76 /* If we don't recognize the model, bail early */
77 if (!dev->model)
78 goto err_free_kmod_dev;
79
80 /* 32bit address space, with the lower 32MB reserved. We clamp
81 * things so it matches kmod VA range limitations.
82 */
83 uint64_t user_va_start = panfrost_clamp_to_usable_va_range(
84 dev->kmod.dev, PANFROST_VA_RESERVE_BOTTOM);
85 uint64_t user_va_end =
86 panfrost_clamp_to_usable_va_range(dev->kmod.dev, 1ull << 32);
87
88 dev->kmod.vm =
89 pan_kmod_vm_create(dev->kmod.dev, PAN_KMOD_VM_FLAG_AUTO_VA, user_va_start,
90 user_va_end - user_va_start);
91 if (!dev->kmod.vm)
92 goto err_free_kmod_dev;
93
94 dev->core_count =
95 panfrost_query_core_count(&dev->kmod.props, &dev->core_id_range);
96 dev->thread_tls_alloc = panfrost_query_thread_tls_alloc(&dev->kmod.props);
97 dev->optimal_tib_size = panfrost_query_optimal_tib_size(dev->model);
98 dev->compressed_formats =
99 panfrost_query_compressed_formats(&dev->kmod.props);
100 dev->tiler_features = panfrost_query_tiler_features(&dev->kmod.props);
101 dev->has_afbc = panfrost_query_afbc(&dev->kmod.props);
102 dev->formats = panfrost_format_table(dev->arch);
103 dev->blendable_formats = panfrost_blendable_format_table(dev->arch);
104
105 util_sparse_array_init(&dev->bo_map, sizeof(struct panfrost_bo), 512);
106
107 pthread_mutex_init(&dev->bo_cache.lock, NULL);
108 list_inithead(&dev->bo_cache.lru);
109
110 for (unsigned i = 0; i < ARRAY_SIZE(dev->bo_cache.buckets); ++i)
111 list_inithead(&dev->bo_cache.buckets[i]);
112
113 /* Initialize pandecode before we start allocating */
114 if (dev->debug & (PAN_DBG_TRACE | PAN_DBG_SYNC))
115 dev->decode_ctx = pandecode_create_context(!(dev->debug & PAN_DBG_TRACE));
116
117 /* Tiler heap is internally required by the tiler, which can only be
118 * active for a single job chain at once, so a single heap can be
119 * shared across batches/contextes */
120
121 dev->tiler_heap = panfrost_bo_create(
122 dev, 128 * 1024 * 1024, PAN_BO_INVISIBLE | PAN_BO_GROWABLE, "Tiler heap");
123
124 pthread_mutex_init(&dev->submit_lock, NULL);
125
126 /* Done once on init */
127 dev->sample_positions = panfrost_bo_create(
128 dev, panfrost_sample_positions_buffer_size(), 0, "Sample positions");
129 panfrost_upload_sample_positions(dev->sample_positions->ptr.cpu);
130 return;
131
132 err_free_kmod_dev:
133 pan_kmod_dev_destroy(dev->kmod.dev);
134 dev->kmod.dev = NULL;
135 }
136
137 void
panfrost_close_device(struct panfrost_device * dev)138 panfrost_close_device(struct panfrost_device *dev)
139 {
140 /* If we don't recognize the model, the rest of the device won't exist,
141 * we will have early-exited the device open.
142 */
143 if (dev->model) {
144 pthread_mutex_destroy(&dev->submit_lock);
145 panfrost_bo_unreference(dev->tiler_heap);
146 panfrost_bo_unreference(dev->sample_positions);
147 panfrost_bo_cache_evict_all(dev);
148 pthread_mutex_destroy(&dev->bo_cache.lock);
149 util_sparse_array_finish(&dev->bo_map);
150 }
151
152 if (dev->kmod.vm)
153 pan_kmod_vm_destroy(dev->kmod.vm);
154
155 if (dev->kmod.dev)
156 pan_kmod_dev_destroy(dev->kmod.dev);
157 }
158