• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2022 Alyssa Rosenzweig
2# Copyright 2021 Collabora, Ltd.
3# Copyright 2016 Intel Corporation
4# SPDX-License-Identifier: MIT
5
6import argparse
7import sys
8import math
9
10a = 'a'
11b = 'b'
12c = 'c'
13d = 'd'
14e = 'e'
15
16lower_sm5_shift = []
17
18# Our shifts differ from SM5 for the upper bits. Mask to match the NIR
19# behaviour. Because this happens as a late lowering, NIR won't optimize the
20# masking back out (that happens in the main nir_opt_algebraic).
21for s in [8, 16, 32, 64]:
22    for shift in ["ishl", "ishr", "ushr"]:
23        lower_sm5_shift += [((shift, f'a@{s}', b),
24                             (shift, a, ('iand', b, s - 1)))]
25
26lower_pack = [
27    (('pack_half_2x16_split', a, b),
28     ('pack_32_2x16_split', ('f2f16', a), ('f2f16', b))),
29
30    # We don't have 8-bit ALU, so we need to lower this. But if we lower it like
31    # this, we can at least coalesce the pack_32_2x16_split and only pay the
32    # cost of the iors and ishl. (u2u16 of 8-bit is assumed free.)
33    (('pack_32_4x8_split', a, b, c, d),
34     ('pack_32_2x16_split', ('ior', ('u2u16', a), ('ishl', ('u2u16', b), 8)),
35                            ('ior', ('u2u16', c), ('ishl', ('u2u16', d), 8)))),
36
37    (('unpack_half_2x16_split_x', a), ('f2f32', ('unpack_32_2x16_split_x', a))),
38    (('unpack_half_2x16_split_y', a), ('f2f32', ('unpack_32_2x16_split_y', a))),
39
40    (('extract_u16', 'a@32', 0), ('u2u32', ('unpack_32_2x16_split_x', a))),
41    (('extract_u16', 'a@32', 1), ('u2u32', ('unpack_32_2x16_split_y', a))),
42    (('extract_i16', 'a@32', 0), ('i2i32', ('unpack_32_2x16_split_x', a))),
43    (('extract_i16', 'a@32', 1), ('i2i32', ('unpack_32_2x16_split_y', a))),
44
45    # For optimizing extract->convert sequences for unpack/pack norm
46    (('u2f32', ('u2u32', a)), ('u2f32', a)),
47    (('i2f32', ('i2i32', a)), ('i2f32', a)),
48
49    # Chew through some 8-bit before the backend has to deal with it
50    (('f2u8', a), ('u2u8', ('f2u16', a))),
51    (('f2i8', a), ('i2i8', ('f2i16', a))),
52
53    # Based on the VIR lowering
54    (('f2f16_rtz', 'a@32'),
55     ('bcsel', ('flt', ('fabs', a), ('fabs', ('f2f32', ('f2f16_rtne', a)))),
56      ('isub', ('f2f16_rtne', a), 1), ('f2f16_rtne', a))),
57
58    # These are based on the lowerings from nir_opt_algebraic, but conditioned
59    # on the number of bits not being constant. If the bit count is constant
60    # (the happy path) we can use our native instruction instead.
61    (('ibitfield_extract', 'value', 'offset', 'bits(is_not_const)'),
62     ('bcsel', ('ieq', 0, 'bits'),
63      0,
64      ('ishr',
65       ('ishl', 'value', ('isub', ('isub', 32, 'bits'), 'offset')),
66       ('isub', 32, 'bits')))),
67
68    (('ubitfield_extract', 'value', 'offset', 'bits(is_not_const)'),
69     ('iand',
70      ('ushr', 'value', 'offset'),
71      ('bcsel', ('ieq', 'bits', 32),
72       0xffffffff,
73       ('isub', ('ishl', 1, 'bits'), 1)))),
74
75    # Codegen depends on this trivial case being optimized out.
76    (('ubitfield_extract', 'value', 'offset', 0), 0),
77    (('ibitfield_extract', 'value', 'offset', 0), 0),
78
79    # At this point, bitfield extracts are constant. We can only do constant
80    # unsigned bitfield extract, so lower signed to unsigned + sign extend.
81    (('ibitfield_extract', a, b, '#bits'),
82     ('ishr', ('ishl', ('ubitfield_extract', a, b, 'bits'), ('isub', 32, 'bits')),
83      ('isub', 32, 'bits'))),
84]
85
86# Rewriting bcsel(a || b, ...) in terms of bcsel(a, ...) and bcsel(b, ...) lets
87# our rules to fuse compare-and-select do a better job, assuming that a and b
88# are comparisons themselves.
89lower_selects = [
90        (('bcsel', ('ior(is_used_once)', a, b), c, d),
91         ('bcsel', a, c, ('bcsel', b, c, d))),
92
93        (('bcsel', ('iand(is_used_once)', a, b), c, d),
94         ('bcsel', a, ('bcsel', b, c, d), d)),
95]
96
97for T, sizes, one in [('f', [16, 32], 1.0),
98                      ('i', [8, 16, 32], 1),
99                      ('b', [32], -1)]:
100    for size in sizes:
101        lower_selects.extend([
102            ((f'b2{T}{size}', ('inot', 'a@1')), ('bcsel', a, 0, one)),
103            ((f'b2{T}{size}', 'a@1'), ('bcsel', a, one, 0)),
104        ])
105
106fuse_extr = []
107for start in range(32):
108    fuse_extr.extend([
109        (('ior', ('ushr', 'a@32', start), ('ishl', 'b@32', 32 - start)),
110         ('extr_agx', a, b, start, 0)),
111    ])
112
113fuse_ubfe = []
114for bits in range(1, 32):
115    fuse_ubfe.extend([
116        (('iand', ('ushr', 'a@32', b), (1 << bits) - 1),
117         ('ubitfield_extract', a, b, bits))
118    ])
119
120# (x * y) + s = (x * y) + (s << 0)
121def imad(x, y, z):
122    return ('imadshl_agx', x, y, z, 0)
123
124# (x * y) - s = (x * y) - (s << 0)
125def imsub(x, y, z):
126    return ('imsubshl_agx', x, y, z, 0)
127
128# x + (y << s) = (x * 1) + (y << s)
129def iaddshl(x, y, s):
130    return ('imadshl_agx', x, 1, y, s)
131
132# x - (y << s) = (x * 1) - (y << s)
133def isubshl(x, y, s):
134    return ('imsubshl_agx', x, 1, y, s)
135
136fuse_imad = [
137    # Reassociate imul+iadd chain in order to fuse imads. This pattern comes up
138    # in compute shader lowering.
139    (('iadd', ('iadd(is_used_once)', ('imul(is_used_once)', a, b),
140              ('imul(is_used_once)', c, d)), e),
141     imad(a, b, imad(c, d, e))),
142
143    # Fuse regular imad
144    (('iadd', ('imul(is_used_once)', a, b), c), imad(a, b, c)),
145    (('isub', ('imul(is_used_once)', a, b), c), imsub(a, b, c)),
146]
147
148for s in range(1, 5):
149    fuse_imad += [
150        # Definitions
151        (('iadd', a, ('ishl(is_used_once)', b, s)), iaddshl(a, b, s)),
152        (('isub', a, ('ishl(is_used_once)', b, s)), isubshl(a, b, s)),
153
154        # ineg(x) is 0 - x
155        (('ineg', ('ishl(is_used_once)', b, s)), isubshl(0, b, s)),
156
157        # Definitions
158        (imad(a, b, ('ishl(is_used_once)', c, s)), ('imadshl_agx', a, b, c, s)),
159        (imsub(a, b, ('ishl(is_used_once)', c, s)), ('imsubshl_agx', a, b, c, s)),
160
161        # a + (a << s) = a + a * (1 << s) = a * (1 + (1 << s))
162        (('imul', a, 1 + (1 << s)), iaddshl(a, a, s)),
163
164        # a - (a << s) = a - a * (1 << s) = a * (1 - (1 << s))
165        (('imul', a, 1 - (1 << s)), isubshl(a, a, s)),
166
167        # a - (a << s) = a * (1 - (1 << s)) = -(a * (1 << s) - 1)
168        (('ineg', ('imul(is_used_once)', a, (1 << s) - 1)), isubshl(a, a, s)),
169
170        # iadd is SCIB, general shfit is IC (slower)
171        (('ishl', a, s), iaddshl(0, a, s)),
172    ]
173
174# Discard lowering generates this pattern, clean it up
175ixor_bcsel = [
176   (('ixor', ('bcsel', a, '#b', '#c'), '#d'),
177    ('bcsel', a, ('ixor', b, d), ('ixor', c, d))),
178]
179
180def main():
181    parser = argparse.ArgumentParser()
182    parser.add_argument('-p', '--import-path', required=True)
183    args = parser.parse_args()
184    sys.path.insert(0, args.import_path)
185    run()
186
187def run():
188    import nir_algebraic  # pylint: disable=import-error
189
190    print('#include "agx_nir.h"')
191
192    print(nir_algebraic.AlgebraicPass("agx_nir_lower_algebraic_late",
193                                      lower_sm5_shift + lower_pack +
194                                      lower_selects).render())
195    print(nir_algebraic.AlgebraicPass("agx_nir_fuse_algebraic_late",
196                                      fuse_extr + fuse_ubfe + fuse_imad +
197                                      ixor_bcsel).render())
198
199
200if __name__ == '__main__':
201    main()
202