1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Implementation of the extensible bitmap type.
4 *
5 * Author : Stephen Smalley, <stephen.smalley.work@gmail.com>
6 */
7/*
8 * Updated: Hewlett-Packard <paul@paul-moore.com>
9 *          Added support to import/export the NetLabel category bitmap
10 *          (c) Copyright Hewlett-Packard Development Company, L.P., 2006
11 *
12 * Updated: KaiGai Kohei <kaigai@ak.jp.nec.com>
13 *          Applied standard bit operations to improve bitmap scanning.
14 */
15
16#include <linux/kernel.h>
17#include <linux/slab.h>
18#include <linux/errno.h>
19#include <linux/jhash.h>
20#include <net/netlabel.h>
21#include "ebitmap.h"
22#include "policydb.h"
23
24#define BITS_PER_U64 (sizeof(u64) * 8)
25
26static struct kmem_cache *ebitmap_node_cachep __ro_after_init;
27
28int ebitmap_cmp(const struct ebitmap *e1, const struct ebitmap *e2)
29{
30	const struct ebitmap_node *n1, *n2;
31
32	if (e1->highbit != e2->highbit)
33		return 0;
34
35	n1 = e1->node;
36	n2 = e2->node;
37	while (n1 && n2 && (n1->startbit == n2->startbit) &&
38	       !memcmp(n1->maps, n2->maps, EBITMAP_SIZE / 8)) {
39		n1 = n1->next;
40		n2 = n2->next;
41	}
42
43	if (n1 || n2)
44		return 0;
45
46	return 1;
47}
48
49int ebitmap_cpy(struct ebitmap *dst, const struct ebitmap *src)
50{
51	struct ebitmap_node *new, *prev;
52	const struct ebitmap_node *n;
53
54	ebitmap_init(dst);
55	n = src->node;
56	prev = NULL;
57	while (n) {
58		new = kmem_cache_zalloc(ebitmap_node_cachep, GFP_ATOMIC);
59		if (!new) {
60			ebitmap_destroy(dst);
61			return -ENOMEM;
62		}
63		new->startbit = n->startbit;
64		memcpy(new->maps, n->maps, EBITMAP_SIZE / 8);
65		new->next = NULL;
66		if (prev)
67			prev->next = new;
68		else
69			dst->node = new;
70		prev = new;
71		n = n->next;
72	}
73
74	dst->highbit = src->highbit;
75	return 0;
76}
77
78int ebitmap_and(struct ebitmap *dst, const struct ebitmap *e1,
79		const struct ebitmap *e2)
80{
81	struct ebitmap_node *n;
82	int bit, rc;
83
84	ebitmap_init(dst);
85
86	ebitmap_for_each_positive_bit(e1, n, bit)
87	{
88		if (ebitmap_get_bit(e2, bit)) {
89			rc = ebitmap_set_bit(dst, bit, 1);
90			if (rc < 0)
91				return rc;
92		}
93	}
94	return 0;
95}
96
97#ifdef CONFIG_NETLABEL
98/**
99 * ebitmap_netlbl_export - Export an ebitmap into a NetLabel category bitmap
100 * @ebmap: the ebitmap to export
101 * @catmap: the NetLabel category bitmap
102 *
103 * Description:
104 * Export a SELinux extensibile bitmap into a NetLabel category bitmap.
105 * Returns zero on success, negative values on error.
106 *
107 */
108int ebitmap_netlbl_export(struct ebitmap *ebmap,
109			  struct netlbl_lsm_catmap **catmap)
110{
111	struct ebitmap_node *e_iter = ebmap->node;
112	unsigned long e_map;
113	u32 offset;
114	unsigned int iter;
115	int rc;
116
117	if (e_iter == NULL) {
118		*catmap = NULL;
119		return 0;
120	}
121
122	if (*catmap != NULL)
123		netlbl_catmap_free(*catmap);
124	*catmap = NULL;
125
126	while (e_iter) {
127		offset = e_iter->startbit;
128		for (iter = 0; iter < EBITMAP_UNIT_NUMS; iter++) {
129			e_map = e_iter->maps[iter];
130			if (e_map != 0) {
131				rc = netlbl_catmap_setlong(catmap, offset,
132							   e_map, GFP_ATOMIC);
133				if (rc != 0)
134					goto netlbl_export_failure;
135			}
136			offset += EBITMAP_UNIT_SIZE;
137		}
138		e_iter = e_iter->next;
139	}
140
141	return 0;
142
143netlbl_export_failure:
144	netlbl_catmap_free(*catmap);
145	return -ENOMEM;
146}
147
148/**
149 * ebitmap_netlbl_import - Import a NetLabel category bitmap into an ebitmap
150 * @ebmap: the ebitmap to import
151 * @catmap: the NetLabel category bitmap
152 *
153 * Description:
154 * Import a NetLabel category bitmap into a SELinux extensibile bitmap.
155 * Returns zero on success, negative values on error.
156 *
157 */
158int ebitmap_netlbl_import(struct ebitmap *ebmap,
159			  struct netlbl_lsm_catmap *catmap)
160{
161	int rc;
162	struct ebitmap_node *e_iter = NULL;
163	struct ebitmap_node *e_prev = NULL;
164	u32 offset = 0, idx;
165	unsigned long bitmap;
166
167	for (;;) {
168		rc = netlbl_catmap_getlong(catmap, &offset, &bitmap);
169		if (rc < 0)
170			goto netlbl_import_failure;
171		if (offset == (u32)-1)
172			return 0;
173
174		/* don't waste ebitmap space if the netlabel bitmap is empty */
175		if (bitmap == 0) {
176			offset += EBITMAP_UNIT_SIZE;
177			continue;
178		}
179
180		if (e_iter == NULL ||
181		    offset >= e_iter->startbit + EBITMAP_SIZE) {
182			e_prev = e_iter;
183			e_iter = kmem_cache_zalloc(ebitmap_node_cachep,
184						   GFP_ATOMIC);
185			if (e_iter == NULL)
186				goto netlbl_import_failure;
187			e_iter->startbit = offset - (offset % EBITMAP_SIZE);
188			if (e_prev == NULL)
189				ebmap->node = e_iter;
190			else
191				e_prev->next = e_iter;
192			ebmap->highbit = e_iter->startbit + EBITMAP_SIZE;
193		}
194
195		/* offset will always be aligned to an unsigned long */
196		idx = EBITMAP_NODE_INDEX(e_iter, offset);
197		e_iter->maps[idx] = bitmap;
198
199		/* next */
200		offset += EBITMAP_UNIT_SIZE;
201	}
202
203	/* NOTE: we should never reach this return */
204	return 0;
205
206netlbl_import_failure:
207	ebitmap_destroy(ebmap);
208	return -ENOMEM;
209}
210#endif /* CONFIG_NETLABEL */
211
212/*
213 * Check to see if all the bits set in e2 are also set in e1. Optionally,
214 * if last_e2bit is non-zero, the highest set bit in e2 cannot exceed
215 * last_e2bit.
216 */
217int ebitmap_contains(const struct ebitmap *e1, const struct ebitmap *e2,
218		     u32 last_e2bit)
219{
220	const struct ebitmap_node *n1, *n2;
221	int i;
222
223	if (e1->highbit < e2->highbit)
224		return 0;
225
226	n1 = e1->node;
227	n2 = e2->node;
228
229	while (n1 && n2 && (n1->startbit <= n2->startbit)) {
230		if (n1->startbit < n2->startbit) {
231			n1 = n1->next;
232			continue;
233		}
234		for (i = EBITMAP_UNIT_NUMS - 1; (i >= 0) && !n2->maps[i];)
235			i--; /* Skip trailing NULL map entries */
236		if (last_e2bit && (i >= 0)) {
237			u32 lastsetbit = n2->startbit + i * EBITMAP_UNIT_SIZE +
238					 __fls(n2->maps[i]);
239			if (lastsetbit > last_e2bit)
240				return 0;
241		}
242
243		while (i >= 0) {
244			if ((n1->maps[i] & n2->maps[i]) != n2->maps[i])
245				return 0;
246			i--;
247		}
248
249		n1 = n1->next;
250		n2 = n2->next;
251	}
252
253	if (n2)
254		return 0;
255
256	return 1;
257}
258
259int ebitmap_get_bit(const struct ebitmap *e, unsigned long bit)
260{
261	const struct ebitmap_node *n;
262
263	if (e->highbit < bit)
264		return 0;
265
266	n = e->node;
267	while (n && (n->startbit <= bit)) {
268		if ((n->startbit + EBITMAP_SIZE) > bit)
269			return ebitmap_node_get_bit(n, bit);
270		n = n->next;
271	}
272
273	return 0;
274}
275
276int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value)
277{
278	struct ebitmap_node *n, *prev, *new;
279
280	prev = NULL;
281	n = e->node;
282	while (n && n->startbit <= bit) {
283		if ((n->startbit + EBITMAP_SIZE) > bit) {
284			if (value) {
285				ebitmap_node_set_bit(n, bit);
286			} else {
287				unsigned int s;
288
289				ebitmap_node_clr_bit(n, bit);
290
291				s = find_first_bit(n->maps, EBITMAP_SIZE);
292				if (s < EBITMAP_SIZE)
293					return 0;
294
295				/* drop this node from the bitmap */
296				if (!n->next) {
297					/*
298					 * this was the highest map
299					 * within the bitmap
300					 */
301					if (prev)
302						e->highbit = prev->startbit +
303							     EBITMAP_SIZE;
304					else
305						e->highbit = 0;
306				}
307				if (prev)
308					prev->next = n->next;
309				else
310					e->node = n->next;
311				kmem_cache_free(ebitmap_node_cachep, n);
312			}
313			return 0;
314		}
315		prev = n;
316		n = n->next;
317	}
318
319	if (!value)
320		return 0;
321
322	new = kmem_cache_zalloc(ebitmap_node_cachep, GFP_ATOMIC);
323	if (!new)
324		return -ENOMEM;
325
326	new->startbit = bit - (bit % EBITMAP_SIZE);
327	ebitmap_node_set_bit(new, bit);
328
329	if (!n)
330		/* this node will be the highest map within the bitmap */
331		e->highbit = new->startbit + EBITMAP_SIZE;
332
333	if (prev) {
334		new->next = prev->next;
335		prev->next = new;
336	} else {
337		new->next = e->node;
338		e->node = new;
339	}
340
341	return 0;
342}
343
344void ebitmap_destroy(struct ebitmap *e)
345{
346	struct ebitmap_node *n, *temp;
347
348	if (!e)
349		return;
350
351	n = e->node;
352	while (n) {
353		temp = n;
354		n = n->next;
355		kmem_cache_free(ebitmap_node_cachep, temp);
356	}
357
358	e->highbit = 0;
359	e->node = NULL;
360}
361
362int ebitmap_read(struct ebitmap *e, void *fp)
363{
364	struct ebitmap_node *n = NULL;
365	u32 mapunit, count, startbit, index;
366	__le32 ebitmap_start;
367	u64 map;
368	__le64 mapbits;
369	__le32 buf[3];
370	int rc, i;
371
372	ebitmap_init(e);
373
374	rc = next_entry(buf, fp, sizeof buf);
375	if (rc < 0)
376		goto out;
377
378	mapunit = le32_to_cpu(buf[0]);
379	e->highbit = le32_to_cpu(buf[1]);
380	count = le32_to_cpu(buf[2]);
381
382	if (mapunit != BITS_PER_U64) {
383		pr_err("SELinux: ebitmap: map size %u does not "
384		       "match my size %zd (high bit was %d)\n",
385		       mapunit, BITS_PER_U64, e->highbit);
386		goto bad;
387	}
388
389	/* round up e->highbit */
390	e->highbit += EBITMAP_SIZE - 1;
391	e->highbit -= (e->highbit % EBITMAP_SIZE);
392
393	if (!e->highbit) {
394		e->node = NULL;
395		goto ok;
396	}
397
398	if (e->highbit && !count)
399		goto bad;
400
401	for (i = 0; i < count; i++) {
402		rc = next_entry(&ebitmap_start, fp, sizeof(u32));
403		if (rc < 0) {
404			pr_err("SELinux: ebitmap: truncated map\n");
405			goto bad;
406		}
407		startbit = le32_to_cpu(ebitmap_start);
408
409		if (startbit & (mapunit - 1)) {
410			pr_err("SELinux: ebitmap start bit (%d) is "
411			       "not a multiple of the map unit size (%u)\n",
412			       startbit, mapunit);
413			goto bad;
414		}
415		if (startbit > e->highbit - mapunit) {
416			pr_err("SELinux: ebitmap start bit (%d) is "
417			       "beyond the end of the bitmap (%u)\n",
418			       startbit, (e->highbit - mapunit));
419			goto bad;
420		}
421
422		if (!n || startbit >= n->startbit + EBITMAP_SIZE) {
423			struct ebitmap_node *tmp;
424			tmp = kmem_cache_zalloc(ebitmap_node_cachep,
425						GFP_KERNEL);
426			if (!tmp) {
427				pr_err("SELinux: ebitmap: out of memory\n");
428				rc = -ENOMEM;
429				goto bad;
430			}
431			/* round down */
432			tmp->startbit = startbit - (startbit % EBITMAP_SIZE);
433			if (n)
434				n->next = tmp;
435			else
436				e->node = tmp;
437			n = tmp;
438		} else if (startbit <= n->startbit) {
439			pr_err("SELinux: ebitmap: start bit %d"
440			       " comes after start bit %d\n",
441			       startbit, n->startbit);
442			goto bad;
443		}
444
445		rc = next_entry(&mapbits, fp, sizeof(u64));
446		if (rc < 0) {
447			pr_err("SELinux: ebitmap: truncated map\n");
448			goto bad;
449		}
450		map = le64_to_cpu(mapbits);
451
452		index = (startbit - n->startbit) / EBITMAP_UNIT_SIZE;
453		while (map) {
454			n->maps[index++] = map & (-1UL);
455			map = EBITMAP_SHIFT_UNIT_SIZE(map);
456		}
457	}
458ok:
459	rc = 0;
460out:
461	return rc;
462bad:
463	if (!rc)
464		rc = -EINVAL;
465	ebitmap_destroy(e);
466	goto out;
467}
468
469int ebitmap_write(const struct ebitmap *e, void *fp)
470{
471	struct ebitmap_node *n;
472	u32 count;
473	__le32 buf[3];
474	u64 map;
475	int bit, last_bit, last_startbit, rc;
476
477	buf[0] = cpu_to_le32(BITS_PER_U64);
478
479	count = 0;
480	last_bit = 0;
481	last_startbit = -1;
482	ebitmap_for_each_positive_bit(e, n, bit)
483	{
484		if (rounddown(bit, (int)BITS_PER_U64) > last_startbit) {
485			count++;
486			last_startbit = rounddown(bit, BITS_PER_U64);
487		}
488		last_bit = roundup(bit + 1, BITS_PER_U64);
489	}
490	buf[1] = cpu_to_le32(last_bit);
491	buf[2] = cpu_to_le32(count);
492
493	rc = put_entry(buf, sizeof(u32), 3, fp);
494	if (rc)
495		return rc;
496
497	map = 0;
498	last_startbit = INT_MIN;
499	ebitmap_for_each_positive_bit(e, n, bit)
500	{
501		if (rounddown(bit, (int)BITS_PER_U64) > last_startbit) {
502			__le64 buf64[1];
503
504			/* this is the very first bit */
505			if (!map) {
506				last_startbit = rounddown(bit, BITS_PER_U64);
507				map = (u64)1 << (bit - last_startbit);
508				continue;
509			}
510
511			/* write the last node */
512			buf[0] = cpu_to_le32(last_startbit);
513			rc = put_entry(buf, sizeof(u32), 1, fp);
514			if (rc)
515				return rc;
516
517			buf64[0] = cpu_to_le64(map);
518			rc = put_entry(buf64, sizeof(u64), 1, fp);
519			if (rc)
520				return rc;
521
522			/* set up for the next node */
523			map = 0;
524			last_startbit = rounddown(bit, BITS_PER_U64);
525		}
526		map |= (u64)1 << (bit - last_startbit);
527	}
528	/* write the last node */
529	if (map) {
530		__le64 buf64[1];
531
532		/* write the last node */
533		buf[0] = cpu_to_le32(last_startbit);
534		rc = put_entry(buf, sizeof(u32), 1, fp);
535		if (rc)
536			return rc;
537
538		buf64[0] = cpu_to_le64(map);
539		rc = put_entry(buf64, sizeof(u64), 1, fp);
540		if (rc)
541			return rc;
542	}
543	return 0;
544}
545
546u32 ebitmap_hash(const struct ebitmap *e, u32 hash)
547{
548	struct ebitmap_node *node;
549
550	/* need to change hash even if ebitmap is empty */
551	hash = jhash_1word(e->highbit, hash);
552	for (node = e->node; node; node = node->next) {
553		hash = jhash_1word(node->startbit, hash);
554		hash = jhash(node->maps, sizeof(node->maps), hash);
555	}
556	return hash;
557}
558
559void __init ebitmap_cache_init(void)
560{
561	ebitmap_node_cachep = kmem_cache_create("ebitmap_node",
562						sizeof(struct ebitmap_node), 0,
563						SLAB_PANIC, NULL);
564}
565