• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2019 Collabora, Ltd.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *   Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
25  */
26 
27 #include <xf86drm.h>
28 
29 #include "drm-uapi/panfrost_drm.h"
30 #include "util/hash_table.h"
31 #include "util/macros.h"
32 #include "util/u_math.h"
33 #include "util/u_thread.h"
34 #include "pan_bo.h"
35 #include "pan_device.h"
36 #include "pan_encoder.h"
37 #include "pan_samples.h"
38 #include "pan_texture.h"
39 #include "pan_util.h"
40 #include "wrap.h"
41 
42 /* DRM_PANFROST_PARAM_TEXTURE_FEATURES0 will return a bitmask of supported
43  * compressed formats, so we offer a helper to test if a format is supported */
44 
45 bool
panfrost_supports_compressed_format(struct panfrost_device * dev,unsigned texfeat_bit)46 panfrost_supports_compressed_format(struct panfrost_device *dev,
47                                     unsigned texfeat_bit)
48 {
49    assert(texfeat_bit < 32);
50    return dev->compressed_formats & BITFIELD_BIT(texfeat_bit);
51 }
52 
53 int
panfrost_open_device(void * memctx,int fd,struct panfrost_device * dev)54 panfrost_open_device(void *memctx, int fd, struct panfrost_device *dev)
55 {
56    dev->memctx = memctx;
57 
58    dev->kmod.dev = pan_kmod_dev_create(fd, PAN_KMOD_DEV_FLAG_OWNS_FD, NULL);
59    if (!dev->kmod.dev) {
60       close(fd);
61       return -1;
62    }
63 
64    pan_kmod_dev_query_props(dev->kmod.dev, &dev->kmod.props);
65 
66    dev->arch = pan_arch(dev->kmod.props.gpu_prod_id);
67    dev->model = panfrost_get_model(dev->kmod.props.gpu_prod_id,
68                                    dev->kmod.props.gpu_variant);
69 
70    /* If we don't recognize the model, bail early */
71    if (!dev->model)
72       goto err_free_kmod_dev;
73 
74    /* 48bit address space max, with the lower 32MB reserved. We clamp
75     * things so it matches kmod VA range limitations.
76     */
77    uint64_t user_va_start =
78       panfrost_clamp_to_usable_va_range(dev->kmod.dev, PAN_VA_USER_START);
79    uint64_t user_va_end =
80       panfrost_clamp_to_usable_va_range(dev->kmod.dev, PAN_VA_USER_END);
81 
82    dev->kmod.vm = pan_kmod_vm_create(
83       dev->kmod.dev, PAN_KMOD_VM_FLAG_AUTO_VA | PAN_KMOD_VM_FLAG_TRACK_ACTIVITY,
84       user_va_start, user_va_end - user_va_start);
85    if (!dev->kmod.vm)
86       goto err_free_kmod_dev;
87 
88    dev->core_count =
89       panfrost_query_core_count(&dev->kmod.props, &dev->core_id_range);
90    dev->thread_tls_alloc = panfrost_query_thread_tls_alloc(&dev->kmod.props);
91    dev->optimal_tib_size = panfrost_query_optimal_tib_size(dev->model);
92    dev->compressed_formats =
93       panfrost_query_compressed_formats(&dev->kmod.props);
94    dev->tiler_features = panfrost_query_tiler_features(&dev->kmod.props);
95    dev->has_afbc = panfrost_query_afbc(&dev->kmod.props);
96    dev->has_afrc = panfrost_query_afrc(&dev->kmod.props);
97    dev->formats = panfrost_format_table(dev->arch);
98    dev->blendable_formats = panfrost_blendable_format_table(dev->arch);
99 
100    util_sparse_array_init(&dev->bo_map, sizeof(struct panfrost_bo), 512);
101 
102    pthread_mutex_init(&dev->bo_cache.lock, NULL);
103    list_inithead(&dev->bo_cache.lru);
104 
105    for (unsigned i = 0; i < ARRAY_SIZE(dev->bo_cache.buckets); ++i)
106       list_inithead(&dev->bo_cache.buckets[i]);
107 
108    /* Initialize pandecode before we start allocating */
109    if (dev->debug & (PAN_DBG_TRACE | PAN_DBG_SYNC))
110       dev->decode_ctx = pandecode_create_context(!(dev->debug & PAN_DBG_TRACE));
111 
112    /* Tiler heap is internally required by the tiler, which can only be
113     * active for a single job chain at once, so a single heap can be
114     * shared across batches/contextes.
115     *
116     * Heap management is completely different on CSF HW, don't allocate the
117     * heap BO in that case.
118     */
119 
120    if (dev->arch < 10) {
121       dev->tiler_heap = panfrost_bo_create(
122          dev, 128 * 1024 * 1024, PAN_BO_INVISIBLE | PAN_BO_GROWABLE, "Tiler heap");
123       if (!dev->tiler_heap)
124          goto err_free_kmod_dev;
125    }
126 
127    pthread_mutex_init(&dev->submit_lock, NULL);
128 
129    /* Done once on init */
130    dev->sample_positions = panfrost_bo_create(
131       dev, panfrost_sample_positions_buffer_size(), 0, "Sample positions");
132    if (!dev->sample_positions)
133       goto err_free_kmod_dev;
134 
135    panfrost_upload_sample_positions(dev->sample_positions->ptr.cpu);
136    return 0;
137 
138 err_free_kmod_dev:
139    if (dev->decode_ctx)
140       pandecode_destroy_context(dev->decode_ctx);
141 
142    panfrost_bo_unreference(dev->tiler_heap);
143    panfrost_bo_unreference(dev->sample_positions);
144 
145    if (dev->kmod.vm)
146       pan_kmod_vm_destroy(dev->kmod.vm);
147 
148    pan_kmod_dev_destroy(dev->kmod.dev);
149    dev->kmod.dev = NULL;
150    return -1;
151 }
152 
153 void
panfrost_close_device(struct panfrost_device * dev)154 panfrost_close_device(struct panfrost_device *dev)
155 {
156    /* If we don't recognize the model, the rest of the device won't exist,
157     * we will have early-exited the device open.
158     */
159    if (dev->model) {
160       pthread_mutex_destroy(&dev->submit_lock);
161       panfrost_bo_unreference(dev->tiler_heap);
162       panfrost_bo_unreference(dev->sample_positions);
163       panfrost_bo_cache_evict_all(dev);
164       pthread_mutex_destroy(&dev->bo_cache.lock);
165       util_sparse_array_finish(&dev->bo_map);
166    }
167 
168    if (dev->kmod.vm)
169       pan_kmod_vm_destroy(dev->kmod.vm);
170 
171    if (dev->kmod.dev)
172       pan_kmod_dev_destroy(dev->kmod.dev);
173 }
174