1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jason Ekstrand (jason@jlekstrand.net)
25 *
26 */
27
28
29 #ifndef _NIR_WORKLIST_
30 #define _NIR_WORKLIST_
31
32 #include "nir.h"
33 #include "util/set.h"
34 #include "util/u_vector.h"
35
36 #ifdef __cplusplus
37 extern "C" {
38 #endif
39
40 /** Represents a double-ended queue of unique blocks
41 *
42 * The worklist datastructure guarantees that eacy block is in the queue at
43 * most once. Pushing a block onto either end of the queue is a no-op if
44 * the block is already in the queue. In order for this to work, the
45 * caller must ensure that the blocks are properly indexed.
46 */
47 typedef struct {
48 /* The total size of the worklist */
49 unsigned size;
50
51 /* The number of blocks currently in the worklist */
52 unsigned count;
53
54 /* The offset in the array of blocks at which the list starts */
55 unsigned start;
56
57 /* A bitset of all of the blocks currently present in the worklist */
58 BITSET_WORD *blocks_present;
59
60 /* The actual worklist */
61 nir_block **blocks;
62 } nir_block_worklist;
63
64 void nir_block_worklist_init(nir_block_worklist *w, unsigned num_blocks,
65 void *mem_ctx);
66 void nir_block_worklist_fini(nir_block_worklist *w);
67
68 void nir_block_worklist_add_all(nir_block_worklist *w, nir_function_impl *impl);
69
70 static inline bool
nir_block_worklist_is_empty(const nir_block_worklist * w)71 nir_block_worklist_is_empty(const nir_block_worklist *w)
72 {
73 return w->count == 0;
74 }
75
76 void nir_block_worklist_push_head(nir_block_worklist *w, nir_block *block);
77
78 nir_block *nir_block_worklist_peek_head(const nir_block_worklist *w);
79
80 nir_block *nir_block_worklist_pop_head(nir_block_worklist *w);
81
82 void nir_block_worklist_push_tail(nir_block_worklist *w, nir_block *block);
83
84 nir_block *nir_block_worklist_peek_tail(const nir_block_worklist *w);
85
86 nir_block *nir_block_worklist_pop_tail(nir_block_worklist *w);
87
88
89 /*
90 * This worklist implementation, in contrast to the block worklist, does not
91 * have unique entries, meaning a nir_instr can be inserted more than once
92 * into the worklist. It uses u_vector to keep the overhead and memory
93 * footprint at a minimum.
94 *
95 * Making it unique by using a set was tested, but for the single usecase
96 * (nir_opt_dce) it did not improve speed. There we check the pass_flag bit
97 * and abort immediately if there's nothing to do, so the added overhead of
98 * the set was higher than just processing the few extra entries.
99 */
100
101 typedef struct {
102 struct u_vector instr_vec;
103 } nir_instr_worklist;
104
105 static inline nir_instr_worklist *
nir_instr_worklist_create()106 nir_instr_worklist_create() {
107 nir_instr_worklist *wl = malloc(sizeof(nir_instr_worklist));
108 if (!wl)
109 return NULL;
110
111 if (!u_vector_init_pow2(&wl->instr_vec, 8, sizeof(struct nir_instr *))) {
112 free(wl);
113 return NULL;
114 }
115
116 return wl;
117 }
118
119 static inline uint32_t
nir_instr_worklist_length(nir_instr_worklist * wl)120 nir_instr_worklist_length(nir_instr_worklist *wl)
121 {
122 return u_vector_length(&wl->instr_vec);
123 }
124
125 static inline bool
nir_instr_worklist_is_empty(nir_instr_worklist * wl)126 nir_instr_worklist_is_empty(nir_instr_worklist *wl)
127 {
128 return nir_instr_worklist_length(wl) == 0;
129 }
130
131 static inline void
nir_instr_worklist_destroy(nir_instr_worklist * wl)132 nir_instr_worklist_destroy(nir_instr_worklist *wl)
133 {
134 u_vector_finish(&wl->instr_vec);
135 free(wl);
136 }
137
138 static inline void
nir_instr_worklist_push_tail(nir_instr_worklist * wl,nir_instr * instr)139 nir_instr_worklist_push_tail(nir_instr_worklist *wl, nir_instr *instr)
140 {
141 struct nir_instr **vec_instr = u_vector_add(&wl->instr_vec);
142 *vec_instr = instr;
143 }
144
145 static inline nir_instr *
nir_instr_worklist_pop_head(nir_instr_worklist * wl)146 nir_instr_worklist_pop_head(nir_instr_worklist *wl)
147 {
148 struct nir_instr **vec_instr = u_vector_remove(&wl->instr_vec);
149
150 if (vec_instr == NULL)
151 return NULL;
152
153 return *vec_instr;
154 }
155
156 void
157 nir_instr_worklist_add_ssa_srcs(nir_instr_worklist *wl, nir_instr *instr);
158
159 #define nir_foreach_instr_in_worklist(instr, wl) \
160 for (nir_instr *instr; (instr = nir_instr_worklist_pop_head(wl));)
161
162 #ifdef __cplusplus
163 } /* extern "C" */
164 #endif
165
166 #endif /* _NIR_WORKLIST_ */
167