blob: 7335f67ce54eb1f2a0b3a00512f7a4bd2ac3c168 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Implementation of the hash table type.
4 *
5 * Author : Stephen Smalley, <sds@tycho.nsa.gov>
6 */
7#include <linux/kernel.h>
8#include <linux/slab.h>
9#include <linux/errno.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010#include "hashtab.h"
11
12static struct kmem_cache *hashtab_node_cachep;
13
Olivier Deprez157378f2022-04-04 15:47:50 +020014/*
15 * Here we simply round the number of elements up to the nearest power of two.
16 * I tried also other options like rouding down or rounding to the closest
17 * power of two (up or down based on which is closer), but I was unable to
18 * find any significant difference in lookup/insert performance that would
19 * justify switching to a different (less intuitive) formula. It could be that
20 * a different formula is actually more optimal, but any future changes here
21 * should be supported with performance/memory usage data.
22 *
23 * The total memory used by the htable arrays (only) with Fedora policy loaded
24 * is approximately 163 KB at the time of writing.
25 */
26static u32 hashtab_compute_size(u32 nel)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000027{
Olivier Deprez157378f2022-04-04 15:47:50 +020028 return nel == 0 ? 0 : roundup_pow_of_two(nel);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000029}
30
Olivier Deprez157378f2022-04-04 15:47:50 +020031int hashtab_init(struct hashtab *h, u32 nel_hint)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000032{
Olivier Deprez157378f2022-04-04 15:47:50 +020033 u32 size = hashtab_compute_size(nel_hint);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000034
Olivier Deprez157378f2022-04-04 15:47:50 +020035 /* should already be zeroed, but better be safe */
36 h->nel = 0;
37 h->size = 0;
38 h->htable = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000039
Olivier Deprez157378f2022-04-04 15:47:50 +020040 if (size) {
41 h->htable = kcalloc(size, sizeof(*h->htable), GFP_KERNEL);
42 if (!h->htable)
43 return -ENOMEM;
44 h->size = size;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000045 }
Olivier Deprez157378f2022-04-04 15:47:50 +020046 return 0;
47}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000048
Olivier Deprez157378f2022-04-04 15:47:50 +020049int __hashtab_insert(struct hashtab *h, struct hashtab_node **dst,
50 void *key, void *datum)
51{
52 struct hashtab_node *newnode;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000053
54 newnode = kmem_cache_zalloc(hashtab_node_cachep, GFP_KERNEL);
55 if (!newnode)
56 return -ENOMEM;
57 newnode->key = key;
58 newnode->datum = datum;
Olivier Deprez157378f2022-04-04 15:47:50 +020059 newnode->next = *dst;
60 *dst = newnode;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000061
62 h->nel++;
63 return 0;
64}
65
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000066void hashtab_destroy(struct hashtab *h)
67{
68 u32 i;
69 struct hashtab_node *cur, *temp;
70
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000071 for (i = 0; i < h->size; i++) {
72 cur = h->htable[i];
73 while (cur) {
74 temp = cur;
75 cur = cur->next;
76 kmem_cache_free(hashtab_node_cachep, temp);
77 }
78 h->htable[i] = NULL;
79 }
80
81 kfree(h->htable);
82 h->htable = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000083}
84
85int hashtab_map(struct hashtab *h,
86 int (*apply)(void *k, void *d, void *args),
87 void *args)
88{
89 u32 i;
90 int ret;
91 struct hashtab_node *cur;
92
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000093 for (i = 0; i < h->size; i++) {
94 cur = h->htable[i];
95 while (cur) {
96 ret = apply(cur->key, cur->datum, args);
97 if (ret)
98 return ret;
99 cur = cur->next;
100 }
101 }
102 return 0;
103}
104
105
106void hashtab_stat(struct hashtab *h, struct hashtab_info *info)
107{
108 u32 i, chain_len, slots_used, max_chain_len;
109 struct hashtab_node *cur;
110
111 slots_used = 0;
112 max_chain_len = 0;
113 for (i = 0; i < h->size; i++) {
114 cur = h->htable[i];
115 if (cur) {
116 slots_used++;
117 chain_len = 0;
118 while (cur) {
119 chain_len++;
120 cur = cur->next;
121 }
122
123 if (chain_len > max_chain_len)
124 max_chain_len = chain_len;
125 }
126 }
127
128 info->slots_used = slots_used;
129 info->max_chain_len = max_chain_len;
130}
131
Olivier Deprez157378f2022-04-04 15:47:50 +0200132int hashtab_duplicate(struct hashtab *new, struct hashtab *orig,
133 int (*copy)(struct hashtab_node *new,
134 struct hashtab_node *orig, void *args),
135 int (*destroy)(void *k, void *d, void *args),
136 void *args)
137{
138 struct hashtab_node *cur, *tmp, *tail;
139 int i, rc;
140
141 memset(new, 0, sizeof(*new));
142
143 new->htable = kcalloc(orig->size, sizeof(*new->htable), GFP_KERNEL);
144 if (!new->htable)
145 return -ENOMEM;
146
147 new->size = orig->size;
148
149 for (i = 0; i < orig->size; i++) {
150 tail = NULL;
151 for (cur = orig->htable[i]; cur; cur = cur->next) {
152 tmp = kmem_cache_zalloc(hashtab_node_cachep,
153 GFP_KERNEL);
154 if (!tmp)
155 goto error;
156 rc = copy(tmp, cur, args);
157 if (rc) {
158 kmem_cache_free(hashtab_node_cachep, tmp);
159 goto error;
160 }
161 tmp->next = NULL;
162 if (!tail)
163 new->htable[i] = tmp;
164 else
165 tail->next = tmp;
166 tail = tmp;
167 new->nel++;
168 }
169 }
170
171 return 0;
172
173 error:
174 for (i = 0; i < new->size; i++) {
175 for (cur = new->htable[i]; cur; cur = tmp) {
176 tmp = cur->next;
177 destroy(cur->key, cur->datum, args);
178 kmem_cache_free(hashtab_node_cachep, cur);
179 }
180 }
181 kmem_cache_free(hashtab_node_cachep, new);
182 return -ENOMEM;
183}
184
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000185void __init hashtab_cache_init(void)
186{
187 hashtab_node_cachep = kmem_cache_create("hashtab_node",
188 sizeof(struct hashtab_node),
189 0, SLAB_PANIC, NULL);
190}