1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2018 Mellanox Technologies */
3
4 #include <linux/jhash.h>
5 #include <linux/slab.h>
6 #include <linux/xarray.h>
7 #include <linux/hashtable.h>
8
9 #include "mapping.h"
10
11 #define MAPPING_GRACE_PERIOD 2000
12
13 struct mapping_ctx {
14 struct xarray xarray;
15 DECLARE_HASHTABLE(ht, 8);
16 struct mutex lock; /* Guards hashtable and xarray */
17 unsigned long max_id;
18 size_t data_size;
19 bool delayed_removal;
20 struct delayed_work dwork;
21 struct list_head pending_list;
22 spinlock_t pending_list_lock; /* Guards pending list */
23 };
24
25 struct mapping_item {
26 struct rcu_head rcu;
27 struct list_head list;
28 unsigned long timeout;
29 struct hlist_node node;
30 int cnt;
31 u32 id;
32 char data[];
33 };
34
mapping_add(struct mapping_ctx * ctx,void * data,u32 * id)35 int mapping_add(struct mapping_ctx *ctx, void *data, u32 *id)
36 {
37 struct mapping_item *mi;
38 int err = -ENOMEM;
39 u32 hash_key;
40
41 mutex_lock(&ctx->lock);
42
43 hash_key = jhash(data, ctx->data_size, 0);
44 hash_for_each_possible(ctx->ht, mi, node, hash_key) {
45 if (!memcmp(data, mi->data, ctx->data_size))
46 goto attach;
47 }
48
49 mi = kzalloc(sizeof(*mi) + ctx->data_size, GFP_KERNEL);
50 if (!mi)
51 goto err_alloc;
52
53 memcpy(mi->data, data, ctx->data_size);
54 hash_add(ctx->ht, &mi->node, hash_key);
55
56 err = xa_alloc(&ctx->xarray, &mi->id, mi, XA_LIMIT(1, ctx->max_id),
57 GFP_KERNEL);
58 if (err)
59 goto err_assign;
60 attach:
61 ++mi->cnt;
62 *id = mi->id;
63
64 mutex_unlock(&ctx->lock);
65
66 return 0;
67
68 err_assign:
69 hash_del(&mi->node);
70 kfree(mi);
71 err_alloc:
72 mutex_unlock(&ctx->lock);
73
74 return err;
75 }
76
mapping_remove_and_free(struct mapping_ctx * ctx,struct mapping_item * mi)77 static void mapping_remove_and_free(struct mapping_ctx *ctx,
78 struct mapping_item *mi)
79 {
80 xa_erase(&ctx->xarray, mi->id);
81 kfree_rcu(mi, rcu);
82 }
83
mapping_free_item(struct mapping_ctx * ctx,struct mapping_item * mi)84 static void mapping_free_item(struct mapping_ctx *ctx,
85 struct mapping_item *mi)
86 {
87 if (!ctx->delayed_removal) {
88 mapping_remove_and_free(ctx, mi);
89 return;
90 }
91
92 mi->timeout = jiffies + msecs_to_jiffies(MAPPING_GRACE_PERIOD);
93
94 spin_lock(&ctx->pending_list_lock);
95 list_add_tail(&mi->list, &ctx->pending_list);
96 spin_unlock(&ctx->pending_list_lock);
97
98 schedule_delayed_work(&ctx->dwork, MAPPING_GRACE_PERIOD);
99 }
100
mapping_remove(struct mapping_ctx * ctx,u32 id)101 int mapping_remove(struct mapping_ctx *ctx, u32 id)
102 {
103 unsigned long index = id;
104 struct mapping_item *mi;
105 int err = -ENOENT;
106
107 mutex_lock(&ctx->lock);
108 mi = xa_load(&ctx->xarray, index);
109 if (!mi)
110 goto out;
111 err = 0;
112
113 if (--mi->cnt > 0)
114 goto out;
115
116 hash_del(&mi->node);
117 mapping_free_item(ctx, mi);
118 out:
119 mutex_unlock(&ctx->lock);
120
121 return err;
122 }
123
mapping_find(struct mapping_ctx * ctx,u32 id,void * data)124 int mapping_find(struct mapping_ctx *ctx, u32 id, void *data)
125 {
126 unsigned long index = id;
127 struct mapping_item *mi;
128 int err = -ENOENT;
129
130 rcu_read_lock();
131 mi = xa_load(&ctx->xarray, index);
132 if (!mi)
133 goto err_find;
134
135 memcpy(data, mi->data, ctx->data_size);
136 err = 0;
137
138 err_find:
139 rcu_read_unlock();
140 return err;
141 }
142
143 static void
mapping_remove_and_free_list(struct mapping_ctx * ctx,struct list_head * list)144 mapping_remove_and_free_list(struct mapping_ctx *ctx, struct list_head *list)
145 {
146 struct mapping_item *mi;
147
148 list_for_each_entry(mi, list, list)
149 mapping_remove_and_free(ctx, mi);
150 }
151
mapping_work_handler(struct work_struct * work)152 static void mapping_work_handler(struct work_struct *work)
153 {
154 unsigned long min_timeout = 0, now = jiffies;
155 struct mapping_item *mi, *next;
156 LIST_HEAD(pending_items);
157 struct mapping_ctx *ctx;
158
159 ctx = container_of(work, struct mapping_ctx, dwork.work);
160
161 spin_lock(&ctx->pending_list_lock);
162 list_for_each_entry_safe(mi, next, &ctx->pending_list, list) {
163 if (time_after(now, mi->timeout))
164 list_move(&mi->list, &pending_items);
165 else if (!min_timeout ||
166 time_before(mi->timeout, min_timeout))
167 min_timeout = mi->timeout;
168 }
169 spin_unlock(&ctx->pending_list_lock);
170
171 mapping_remove_and_free_list(ctx, &pending_items);
172
173 if (min_timeout)
174 schedule_delayed_work(&ctx->dwork, abs(min_timeout - now));
175 }
176
mapping_flush_work(struct mapping_ctx * ctx)177 static void mapping_flush_work(struct mapping_ctx *ctx)
178 {
179 if (!ctx->delayed_removal)
180 return;
181
182 cancel_delayed_work_sync(&ctx->dwork);
183 mapping_remove_and_free_list(ctx, &ctx->pending_list);
184 }
185
186 struct mapping_ctx *
mapping_create(size_t data_size,u32 max_id,bool delayed_removal)187 mapping_create(size_t data_size, u32 max_id, bool delayed_removal)
188 {
189 struct mapping_ctx *ctx;
190
191 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
192 if (!ctx)
193 return ERR_PTR(-ENOMEM);
194
195 ctx->max_id = max_id ? max_id : UINT_MAX;
196 ctx->data_size = data_size;
197
198 if (delayed_removal) {
199 INIT_DELAYED_WORK(&ctx->dwork, mapping_work_handler);
200 INIT_LIST_HEAD(&ctx->pending_list);
201 spin_lock_init(&ctx->pending_list_lock);
202 ctx->delayed_removal = true;
203 }
204
205 mutex_init(&ctx->lock);
206 xa_init_flags(&ctx->xarray, XA_FLAGS_ALLOC1);
207
208 return ctx;
209 }
210
mapping_destroy(struct mapping_ctx * ctx)211 void mapping_destroy(struct mapping_ctx *ctx)
212 {
213 mapping_flush_work(ctx);
214 xa_destroy(&ctx->xarray);
215 mutex_destroy(&ctx->lock);
216
217 kfree(ctx);
218 }
219