1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Implementation of the SID table type.
4 *
5 * Author : Stephen Smalley, <sds@tycho.nsa.gov>
6 */
7 #include <linux/kernel.h>
8 #include <linux/slab.h>
9 #include <linux/spinlock.h>
10 #include <linux/errno.h>
11 #include "flask.h"
12 #include "security.h"
13 #include "sidtab.h"
14
15 #define SIDTAB_HASH(sid) \
16 (sid & SIDTAB_HASH_MASK)
17
sidtab_init(struct sidtab * s)18 int sidtab_init(struct sidtab *s)
19 {
20 int i;
21
22 s->htable = kmalloc_array(SIDTAB_SIZE, sizeof(*s->htable), GFP_ATOMIC);
23 if (!s->htable)
24 return -ENOMEM;
25 for (i = 0; i < SIDTAB_SIZE; i++)
26 s->htable[i] = NULL;
27 s->nel = 0;
28 s->next_sid = 1;
29 s->shutdown = 0;
30 spin_lock_init(&s->lock);
31 return 0;
32 }
33
sidtab_insert(struct sidtab * s,u32 sid,struct context * context)34 int sidtab_insert(struct sidtab *s, u32 sid, struct context *context)
35 {
36 int hvalue;
37 struct sidtab_node *prev, *cur, *newnode;
38
39 if (!s)
40 return -ENOMEM;
41
42 hvalue = SIDTAB_HASH(sid);
43 prev = NULL;
44 cur = s->htable[hvalue];
45 while (cur && sid > cur->sid) {
46 prev = cur;
47 cur = cur->next;
48 }
49
50 if (cur && sid == cur->sid)
51 return -EEXIST;
52
53 newnode = kmalloc(sizeof(*newnode), GFP_ATOMIC);
54 if (!newnode)
55 return -ENOMEM;
56
57 newnode->sid = sid;
58 if (context_cpy(&newnode->context, context)) {
59 kfree(newnode);
60 return -ENOMEM;
61 }
62
63 if (prev) {
64 newnode->next = prev->next;
65 wmb();
66 prev->next = newnode;
67 } else {
68 newnode->next = s->htable[hvalue];
69 wmb();
70 s->htable[hvalue] = newnode;
71 }
72
73 s->nel++;
74 if (sid >= s->next_sid)
75 s->next_sid = sid + 1;
76 return 0;
77 }
78
sidtab_search_core(struct sidtab * s,u32 sid,int force)79 static struct context *sidtab_search_core(struct sidtab *s, u32 sid, int force)
80 {
81 int hvalue;
82 struct sidtab_node *cur;
83
84 if (!s)
85 return NULL;
86
87 hvalue = SIDTAB_HASH(sid);
88 cur = s->htable[hvalue];
89 while (cur && sid > cur->sid)
90 cur = cur->next;
91
92 if (force && cur && sid == cur->sid && cur->context.len)
93 return &cur->context;
94
95 if (!cur || sid != cur->sid || cur->context.len) {
96 /* Remap invalid SIDs to the unlabeled SID. */
97 sid = SECINITSID_UNLABELED;
98 hvalue = SIDTAB_HASH(sid);
99 cur = s->htable[hvalue];
100 while (cur && sid > cur->sid)
101 cur = cur->next;
102 if (!cur || sid != cur->sid)
103 return NULL;
104 }
105
106 return &cur->context;
107 }
108
sidtab_search(struct sidtab * s,u32 sid)109 struct context *sidtab_search(struct sidtab *s, u32 sid)
110 {
111 return sidtab_search_core(s, sid, 0);
112 }
113
sidtab_search_force(struct sidtab * s,u32 sid)114 struct context *sidtab_search_force(struct sidtab *s, u32 sid)
115 {
116 return sidtab_search_core(s, sid, 1);
117 }
118
sidtab_map(struct sidtab * s,int (* apply)(u32 sid,struct context * context,void * args),void * args)119 int sidtab_map(struct sidtab *s,
120 int (*apply) (u32 sid,
121 struct context *context,
122 void *args),
123 void *args)
124 {
125 int i, rc = 0;
126 struct sidtab_node *cur;
127
128 if (!s)
129 goto out;
130
131 for (i = 0; i < SIDTAB_SIZE; i++) {
132 cur = s->htable[i];
133 while (cur) {
134 rc = apply(cur->sid, &cur->context, args);
135 if (rc)
136 goto out;
137 cur = cur->next;
138 }
139 }
140 out:
141 return rc;
142 }
143
sidtab_update_cache(struct sidtab * s,struct sidtab_node * n,int loc)144 static void sidtab_update_cache(struct sidtab *s, struct sidtab_node *n, int loc)
145 {
146 BUG_ON(loc >= SIDTAB_CACHE_LEN);
147
148 while (loc > 0) {
149 s->cache[loc] = s->cache[loc - 1];
150 loc--;
151 }
152 s->cache[0] = n;
153 }
154
sidtab_search_context(struct sidtab * s,struct context * context)155 static inline u32 sidtab_search_context(struct sidtab *s,
156 struct context *context)
157 {
158 int i;
159 struct sidtab_node *cur;
160
161 for (i = 0; i < SIDTAB_SIZE; i++) {
162 cur = s->htable[i];
163 while (cur) {
164 if (context_cmp(&cur->context, context)) {
165 sidtab_update_cache(s, cur, SIDTAB_CACHE_LEN - 1);
166 return cur->sid;
167 }
168 cur = cur->next;
169 }
170 }
171 return 0;
172 }
173
sidtab_search_cache(struct sidtab * s,struct context * context)174 static inline u32 sidtab_search_cache(struct sidtab *s, struct context *context)
175 {
176 int i;
177 struct sidtab_node *node;
178
179 for (i = 0; i < SIDTAB_CACHE_LEN; i++) {
180 node = s->cache[i];
181 if (unlikely(!node))
182 return 0;
183 if (context_cmp(&node->context, context)) {
184 sidtab_update_cache(s, node, i);
185 return node->sid;
186 }
187 }
188 return 0;
189 }
190
sidtab_context_to_sid(struct sidtab * s,struct context * context,u32 * out_sid)191 int sidtab_context_to_sid(struct sidtab *s,
192 struct context *context,
193 u32 *out_sid)
194 {
195 u32 sid;
196 int ret = 0;
197 unsigned long flags;
198
199 *out_sid = SECSID_NULL;
200
201 sid = sidtab_search_cache(s, context);
202 if (!sid)
203 sid = sidtab_search_context(s, context);
204 if (!sid) {
205 spin_lock_irqsave(&s->lock, flags);
206 /* Rescan now that we hold the lock. */
207 sid = sidtab_search_context(s, context);
208 if (sid)
209 goto unlock_out;
210 /* No SID exists for the context. Allocate a new one. */
211 if (s->next_sid == UINT_MAX || s->shutdown) {
212 ret = -ENOMEM;
213 goto unlock_out;
214 }
215 sid = s->next_sid++;
216 if (context->len)
217 printk(KERN_INFO
218 "SELinux: Context %s is not valid (left unmapped).\n",
219 context->str);
220 ret = sidtab_insert(s, sid, context);
221 if (ret)
222 s->next_sid--;
223 unlock_out:
224 spin_unlock_irqrestore(&s->lock, flags);
225 }
226
227 if (ret)
228 return ret;
229
230 *out_sid = sid;
231 return 0;
232 }
233
sidtab_hash_eval(struct sidtab * h,char * tag)234 void sidtab_hash_eval(struct sidtab *h, char *tag)
235 {
236 int i, chain_len, slots_used, max_chain_len;
237 struct sidtab_node *cur;
238
239 slots_used = 0;
240 max_chain_len = 0;
241 for (i = 0; i < SIDTAB_SIZE; i++) {
242 cur = h->htable[i];
243 if (cur) {
244 slots_used++;
245 chain_len = 0;
246 while (cur) {
247 chain_len++;
248 cur = cur->next;
249 }
250
251 if (chain_len > max_chain_len)
252 max_chain_len = chain_len;
253 }
254 }
255
256 printk(KERN_DEBUG "%s: %d entries and %d/%d buckets used, longest "
257 "chain length %d\n", tag, h->nel, slots_used, SIDTAB_SIZE,
258 max_chain_len);
259 }
260
sidtab_destroy(struct sidtab * s)261 void sidtab_destroy(struct sidtab *s)
262 {
263 int i;
264 struct sidtab_node *cur, *temp;
265
266 if (!s)
267 return;
268
269 for (i = 0; i < SIDTAB_SIZE; i++) {
270 cur = s->htable[i];
271 while (cur) {
272 temp = cur;
273 cur = cur->next;
274 context_destroy(&temp->context);
275 kfree(temp);
276 }
277 s->htable[i] = NULL;
278 }
279 kfree(s->htable);
280 s->htable = NULL;
281 s->nel = 0;
282 s->next_sid = 1;
283 }
284
sidtab_set(struct sidtab * dst,struct sidtab * src)285 void sidtab_set(struct sidtab *dst, struct sidtab *src)
286 {
287 unsigned long flags;
288 int i;
289
290 spin_lock_irqsave(&src->lock, flags);
291 dst->htable = src->htable;
292 dst->nel = src->nel;
293 dst->next_sid = src->next_sid;
294 dst->shutdown = 0;
295 for (i = 0; i < SIDTAB_CACHE_LEN; i++)
296 dst->cache[i] = NULL;
297 spin_unlock_irqrestore(&src->lock, flags);
298 }
299
sidtab_shutdown(struct sidtab * s)300 void sidtab_shutdown(struct sidtab *s)
301 {
302 unsigned long flags;
303
304 spin_lock_irqsave(&s->lock, flags);
305 s->shutdown = 1;
306 spin_unlock_irqrestore(&s->lock, flags);
307 }
308