1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Implementation of the SID table type.
4 *
5 * Original author: Stephen Smalley, <sds@tycho.nsa.gov>
6 * Author: Ondrej Mosnacek, <omosnacek@gmail.com>
7 *
8 * Copyright (C) 2018 Red Hat, Inc.
9 */
10 #include <linux/errno.h>
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/sched.h>
14 #include <linux/spinlock.h>
15 #include <asm/barrier.h>
16 #include "flask.h"
17 #include "security.h"
18 #include "sidtab.h"
19
20 #define index_to_sid(index) (index + SECINITSID_NUM + 1)
21 #define sid_to_index(sid) (sid - (SECINITSID_NUM + 1))
22
sidtab_init(struct sidtab * s)23 int sidtab_init(struct sidtab *s)
24 {
25 u32 i;
26
27 memset(s->roots, 0, sizeof(s->roots));
28
29 for (i = 0; i < SECINITSID_NUM; i++)
30 s->isids[i].set = 0;
31
32 s->count = 0;
33 s->convert = NULL;
34 hash_init(s->context_to_sid);
35
36 spin_lock_init(&s->lock);
37 return 0;
38 }
39
context_to_sid(struct sidtab * s,struct context * context)40 static u32 context_to_sid(struct sidtab *s, struct context *context)
41 {
42 struct sidtab_entry_leaf *entry;
43 u32 sid = 0;
44
45 rcu_read_lock();
46 hash_for_each_possible_rcu(s->context_to_sid, entry, list,
47 context->hash) {
48 if (context_cmp(&entry->context, context)) {
49 sid = entry->sid;
50 break;
51 }
52 }
53 rcu_read_unlock();
54 return sid;
55 }
56
sidtab_set_initial(struct sidtab * s,u32 sid,struct context * context)57 int sidtab_set_initial(struct sidtab *s, u32 sid, struct context *context)
58 {
59 struct sidtab_isid_entry *entry;
60 int rc;
61
62 if (sid == 0 || sid > SECINITSID_NUM)
63 return -EINVAL;
64
65 entry = &s->isids[sid - 1];
66
67 rc = context_cpy(&entry->leaf.context, context);
68 if (rc)
69 return rc;
70
71 entry->set = 1;
72
73 /*
74 * Multiple initial sids may map to the same context. Check that this
75 * context is not already represented in the context_to_sid hashtable
76 * to avoid duplicate entries and long linked lists upon hash
77 * collision.
78 */
79 if (!context_to_sid(s, context)) {
80 entry->leaf.sid = sid;
81 hash_add(s->context_to_sid, &entry->leaf.list, context->hash);
82 }
83
84 return 0;
85 }
86
sidtab_hash_stats(struct sidtab * sidtab,char * page)87 int sidtab_hash_stats(struct sidtab *sidtab, char *page)
88 {
89 int i;
90 int chain_len = 0;
91 int slots_used = 0;
92 int entries = 0;
93 int max_chain_len = 0;
94 int cur_bucket = 0;
95 struct sidtab_entry_leaf *entry;
96
97 rcu_read_lock();
98 hash_for_each_rcu(sidtab->context_to_sid, i, entry, list) {
99 entries++;
100 if (i == cur_bucket) {
101 chain_len++;
102 if (chain_len == 1)
103 slots_used++;
104 } else {
105 cur_bucket = i;
106 if (chain_len > max_chain_len)
107 max_chain_len = chain_len;
108 chain_len = 0;
109 }
110 }
111 rcu_read_unlock();
112
113 if (chain_len > max_chain_len)
114 max_chain_len = chain_len;
115
116 return scnprintf(page, PAGE_SIZE, "entries: %d\nbuckets used: %d/%d\n"
117 "longest chain: %d\n", entries,
118 slots_used, SIDTAB_HASH_BUCKETS, max_chain_len);
119 }
120
sidtab_level_from_count(u32 count)121 static u32 sidtab_level_from_count(u32 count)
122 {
123 u32 capacity = SIDTAB_LEAF_ENTRIES;
124 u32 level = 0;
125
126 while (count > capacity) {
127 capacity <<= SIDTAB_INNER_SHIFT;
128 ++level;
129 }
130 return level;
131 }
132
sidtab_alloc_roots(struct sidtab * s,u32 level)133 static int sidtab_alloc_roots(struct sidtab *s, u32 level)
134 {
135 u32 l;
136
137 if (!s->roots[0].ptr_leaf) {
138 s->roots[0].ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
139 GFP_ATOMIC);
140 if (!s->roots[0].ptr_leaf)
141 return -ENOMEM;
142 }
143 for (l = 1; l <= level; ++l)
144 if (!s->roots[l].ptr_inner) {
145 s->roots[l].ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
146 GFP_ATOMIC);
147 if (!s->roots[l].ptr_inner)
148 return -ENOMEM;
149 s->roots[l].ptr_inner->entries[0] = s->roots[l - 1];
150 }
151 return 0;
152 }
153
sidtab_do_lookup(struct sidtab * s,u32 index,int alloc)154 static struct sidtab_entry_leaf *sidtab_do_lookup(struct sidtab *s, u32 index,
155 int alloc)
156 {
157 union sidtab_entry_inner *entry;
158 u32 level, capacity_shift, leaf_index = index / SIDTAB_LEAF_ENTRIES;
159
160 /* find the level of the subtree we need */
161 level = sidtab_level_from_count(index + 1);
162 capacity_shift = level * SIDTAB_INNER_SHIFT;
163
164 /* allocate roots if needed */
165 if (alloc && sidtab_alloc_roots(s, level) != 0)
166 return NULL;
167
168 /* lookup inside the subtree */
169 entry = &s->roots[level];
170 while (level != 0) {
171 capacity_shift -= SIDTAB_INNER_SHIFT;
172 --level;
173
174 entry = &entry->ptr_inner->entries[leaf_index >> capacity_shift];
175 leaf_index &= ((u32)1 << capacity_shift) - 1;
176
177 if (!entry->ptr_inner) {
178 if (alloc)
179 entry->ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
180 GFP_ATOMIC);
181 if (!entry->ptr_inner)
182 return NULL;
183 }
184 }
185 if (!entry->ptr_leaf) {
186 if (alloc)
187 entry->ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
188 GFP_ATOMIC);
189 if (!entry->ptr_leaf)
190 return NULL;
191 }
192 return &entry->ptr_leaf->entries[index % SIDTAB_LEAF_ENTRIES];
193 }
194
sidtab_lookup(struct sidtab * s,u32 index)195 static struct context *sidtab_lookup(struct sidtab *s, u32 index)
196 {
197 /* read entries only after reading count */
198 u32 count = smp_load_acquire(&s->count);
199
200 if (index >= count)
201 return NULL;
202
203 return &sidtab_do_lookup(s, index, 0)->context;
204 }
205
sidtab_lookup_initial(struct sidtab * s,u32 sid)206 static struct context *sidtab_lookup_initial(struct sidtab *s, u32 sid)
207 {
208 return s->isids[sid - 1].set ? &s->isids[sid - 1].leaf.context : NULL;
209 }
210
sidtab_search_core(struct sidtab * s,u32 sid,int force)211 static struct context *sidtab_search_core(struct sidtab *s, u32 sid, int force)
212 {
213 struct context *context;
214
215 if (sid != 0) {
216 if (sid > SECINITSID_NUM)
217 context = sidtab_lookup(s, sid_to_index(sid));
218 else
219 context = sidtab_lookup_initial(s, sid);
220 if (context && (!context->len || force))
221 return context;
222 }
223
224 return sidtab_lookup_initial(s, SECINITSID_UNLABELED);
225 }
226
sidtab_search(struct sidtab * s,u32 sid)227 struct context *sidtab_search(struct sidtab *s, u32 sid)
228 {
229 return sidtab_search_core(s, sid, 0);
230 }
231
sidtab_search_force(struct sidtab * s,u32 sid)232 struct context *sidtab_search_force(struct sidtab *s, u32 sid)
233 {
234 return sidtab_search_core(s, sid, 1);
235 }
236
sidtab_context_to_sid(struct sidtab * s,struct context * context,u32 * sid)237 int sidtab_context_to_sid(struct sidtab *s, struct context *context,
238 u32 *sid)
239 {
240 unsigned long flags;
241 u32 count;
242 struct sidtab_convert_params *convert;
243 struct sidtab_entry_leaf *dst, *dst_convert;
244 int rc;
245
246 *sid = context_to_sid(s, context);
247 if (*sid)
248 return 0;
249
250 /* lock-free search failed: lock, re-search, and insert if not found */
251 spin_lock_irqsave(&s->lock, flags);
252
253 rc = 0;
254 *sid = context_to_sid(s, context);
255 if (*sid)
256 goto out_unlock;
257
258 /* read entries only after reading count */
259 count = smp_load_acquire(&s->count);
260 convert = s->convert;
261
262 /* bail out if we already reached max entries */
263 rc = -EOVERFLOW;
264 if (count >= SIDTAB_MAX)
265 goto out_unlock;
266
267 /* insert context into new entry */
268 rc = -ENOMEM;
269 dst = sidtab_do_lookup(s, count, 1);
270 if (!dst)
271 goto out_unlock;
272
273 dst->sid = index_to_sid(count);
274
275 rc = context_cpy(&dst->context, context);
276 if (rc)
277 goto out_unlock;
278
279 /*
280 * if we are building a new sidtab, we need to convert the context
281 * and insert it there as well
282 */
283 if (convert) {
284 rc = -ENOMEM;
285 dst_convert = sidtab_do_lookup(convert->target, count, 1);
286 if (!dst_convert) {
287 context_destroy(&dst->context);
288 goto out_unlock;
289 }
290
291 rc = convert->func(context, &dst_convert->context,
292 convert->args);
293 if (rc) {
294 context_destroy(&dst->context);
295 goto out_unlock;
296 }
297 dst_convert->sid = index_to_sid(count);
298 convert->target->count = count + 1;
299
300 hash_add_rcu(convert->target->context_to_sid,
301 &dst_convert->list, dst_convert->context.hash);
302 }
303
304 if (context->len)
305 pr_info("SELinux: Context %s is not valid (left unmapped).\n",
306 context->str);
307
308 *sid = index_to_sid(count);
309
310 /* write entries before updating count */
311 smp_store_release(&s->count, count + 1);
312 hash_add_rcu(s->context_to_sid, &dst->list, dst->context.hash);
313
314 rc = 0;
315 out_unlock:
316 spin_unlock_irqrestore(&s->lock, flags);
317 return rc;
318 }
319
sidtab_convert_hashtable(struct sidtab * s,u32 count)320 static void sidtab_convert_hashtable(struct sidtab *s, u32 count)
321 {
322 struct sidtab_entry_leaf *entry;
323 u32 i;
324
325 for (i = 0; i < count; i++) {
326 entry = sidtab_do_lookup(s, i, 0);
327 entry->sid = index_to_sid(i);
328
329 hash_add_rcu(s->context_to_sid, &entry->list,
330 entry->context.hash);
331
332 }
333 }
334
sidtab_convert_tree(union sidtab_entry_inner * edst,union sidtab_entry_inner * esrc,u32 * pos,u32 count,u32 level,struct sidtab_convert_params * convert)335 static int sidtab_convert_tree(union sidtab_entry_inner *edst,
336 union sidtab_entry_inner *esrc,
337 u32 *pos, u32 count, u32 level,
338 struct sidtab_convert_params *convert)
339 {
340 int rc;
341 u32 i;
342
343 if (level != 0) {
344 if (!edst->ptr_inner) {
345 edst->ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
346 GFP_KERNEL);
347 if (!edst->ptr_inner)
348 return -ENOMEM;
349 }
350 i = 0;
351 while (i < SIDTAB_INNER_ENTRIES && *pos < count) {
352 rc = sidtab_convert_tree(&edst->ptr_inner->entries[i],
353 &esrc->ptr_inner->entries[i],
354 pos, count, level - 1,
355 convert);
356 if (rc)
357 return rc;
358 i++;
359 }
360 } else {
361 if (!edst->ptr_leaf) {
362 edst->ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
363 GFP_KERNEL);
364 if (!edst->ptr_leaf)
365 return -ENOMEM;
366 }
367 i = 0;
368 while (i < SIDTAB_LEAF_ENTRIES && *pos < count) {
369 rc = convert->func(&esrc->ptr_leaf->entries[i].context,
370 &edst->ptr_leaf->entries[i].context,
371 convert->args);
372 if (rc)
373 return rc;
374 (*pos)++;
375 i++;
376 }
377 cond_resched();
378 }
379
380 return 0;
381 }
382
sidtab_convert(struct sidtab * s,struct sidtab_convert_params * params)383 int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params)
384 {
385 unsigned long flags;
386 u32 count, level, pos;
387 int rc;
388
389 spin_lock_irqsave(&s->lock, flags);
390
391 /* concurrent policy loads are not allowed */
392 if (s->convert) {
393 spin_unlock_irqrestore(&s->lock, flags);
394 return -EBUSY;
395 }
396
397 count = s->count;
398 level = sidtab_level_from_count(count);
399
400 /* allocate last leaf in the new sidtab (to avoid race with
401 * live convert)
402 */
403 rc = sidtab_do_lookup(params->target, count - 1, 1) ? 0 : -ENOMEM;
404 if (rc) {
405 spin_unlock_irqrestore(&s->lock, flags);
406 return rc;
407 }
408
409 /* set count in case no new entries are added during conversion */
410 params->target->count = count;
411
412 /* enable live convert of new entries */
413 s->convert = params;
414
415 /* we can safely convert the tree outside the lock */
416 spin_unlock_irqrestore(&s->lock, flags);
417
418 pr_info("SELinux: Converting %u SID table entries...\n", count);
419
420 /* convert all entries not covered by live convert */
421 pos = 0;
422 rc = sidtab_convert_tree(¶ms->target->roots[level],
423 &s->roots[level], &pos, count, level, params);
424 if (rc) {
425 /* we need to keep the old table - disable live convert */
426 spin_lock_irqsave(&s->lock, flags);
427 s->convert = NULL;
428 spin_unlock_irqrestore(&s->lock, flags);
429 return rc;
430 }
431 /*
432 * The hashtable can also be modified in sidtab_context_to_sid()
433 * so we must re-acquire the lock here.
434 */
435 spin_lock_irqsave(&s->lock, flags);
436 sidtab_convert_hashtable(params->target, count);
437 spin_unlock_irqrestore(&s->lock, flags);
438
439 return 0;
440 }
441
sidtab_destroy_tree(union sidtab_entry_inner entry,u32 level)442 static void sidtab_destroy_tree(union sidtab_entry_inner entry, u32 level)
443 {
444 u32 i;
445
446 if (level != 0) {
447 struct sidtab_node_inner *node = entry.ptr_inner;
448
449 if (!node)
450 return;
451
452 for (i = 0; i < SIDTAB_INNER_ENTRIES; i++)
453 sidtab_destroy_tree(node->entries[i], level - 1);
454 kfree(node);
455 } else {
456 struct sidtab_node_leaf *node = entry.ptr_leaf;
457
458 if (!node)
459 return;
460
461 for (i = 0; i < SIDTAB_LEAF_ENTRIES; i++)
462 context_destroy(&node->entries[i].context);
463 kfree(node);
464 }
465 }
466
sidtab_destroy(struct sidtab * s)467 void sidtab_destroy(struct sidtab *s)
468 {
469 u32 i, level;
470
471 for (i = 0; i < SECINITSID_NUM; i++)
472 if (s->isids[i].set)
473 context_destroy(&s->isids[i].leaf.context);
474
475 level = SIDTAB_MAX_LEVEL;
476 while (level && !s->roots[level].ptr_inner)
477 --level;
478
479 sidtab_destroy_tree(s->roots[level], level);
480 /*
481 * The context_to_sid hashtable's objects are all shared
482 * with the isids array and context tree, and so don't need
483 * to be cleaned up here.
484 */
485 }
486