• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2019 Collabora, Ltd.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *   Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
25  */
26 
27 #include "util/u_math.h"
28 #include "util/macros.h"
29 #include "pan_encoder.h"
30 
31 /* Midgard has a small register file, so shaders with high register pressure
32  * need to spill from the register file onto the stack. In addition to
33  * spilling, it is desireable to allocate temporary arrays on the stack (for
34  * instance because the register file does not support indirect access but the
35  * stack does).
36  *
37  * The stack is located in "Thread Local Storage", sometimes abbreviated TLS in
38  * the kernel source code. Thread local storage is allocated per-thread,
39  * per-core, so threads executing concurrently do not interfere with each
40  * other's stacks. On modern kernels, we may query
41  * DRM_PANFROST_PARAM_THREAD_TLS_ALLOC for the number of threads per core we
42  * must allocate for, and DRM_PANFROST_PARAM_SHADER_PRESENT for a bitmask of
43  * shader cores (so take a popcount of that mask for the number of shader
44  * cores). On older kernels that do not support querying these values,
45  * following kbase, we may use the worst-case value of 256 threads for
46  * THREAD_TLS_ALLOC, and the worst-case value of 16 cores for Midgard per the
47  * "shader core count" column of the implementations table in
48  * https://en.wikipedia.org/wiki/Mali_%28GPU% [citation needed]
49  *
50  * Within a particular thread, there is stack allocated. If it is present, its
51  * size is a power-of-two, and it is at least 16 bytes. Stack is allocated
52  * with the shared memory descriptor used for all shaders within a frame (note
53  * that they don't execute concurrently so it's fine). So, consider the maximum
54  * stack size used by any shader within a job, and then compute (where npot
55  * denotes the next power of two):
56  *
57  *      bytes/thread = npot(max(size, 16))
58  *      allocated = (# of bytes/thread) * (# of threads/core) * (# of cores)
59  *
60  * The size of Thread Local Storage is signaled to the GPU in the tls_size
61  * field, which has a log2 modifier and is in units of 16 bytes.
62  */
63 
64 /* Computes log_stack_size = log2(ceil(s / 16)) */
65 
66 unsigned
panfrost_get_stack_shift(unsigned stack_size)67 panfrost_get_stack_shift(unsigned stack_size)
68 {
69         if (stack_size)
70                 return util_logbase2_ceil(DIV_ROUND_UP(stack_size, 16));
71         else
72                 return 0;
73 }
74 
75 /* Computes the aligned stack size given the shift and thread count. */
76 
77 unsigned
panfrost_get_total_stack_size(unsigned thread_size,unsigned threads_per_core,unsigned core_count)78 panfrost_get_total_stack_size(
79                 unsigned thread_size,
80                 unsigned threads_per_core,
81                 unsigned core_count)
82 {
83         unsigned size_per_thread = (thread_size == 0) ? 0 :
84                 util_next_power_of_two(ALIGN_POT(thread_size, 16));
85 
86         return size_per_thread * threads_per_core * core_count;
87 }
88