1 /*
2 * Copyright (c) 2014-2015 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10 #include <linux/dma-mapping.h>
11 #include <linux/interrupt.h>
12 #include <linux/skbuff.h>
13 #include <linux/slab.h>
14
15 #include "hnae.h"
16
17 #define cls_to_ae_dev(dev) container_of(dev, struct hnae_ae_dev, cls_dev)
18
19 static struct class *hnae_class;
20
21 static void
hnae_list_add(spinlock_t * lock,struct list_head * node,struct list_head * head)22 hnae_list_add(spinlock_t *lock, struct list_head *node, struct list_head *head)
23 {
24 unsigned long flags;
25
26 spin_lock_irqsave(lock, flags);
27 list_add_tail_rcu(node, head);
28 spin_unlock_irqrestore(lock, flags);
29 }
30
hnae_list_del(spinlock_t * lock,struct list_head * node)31 static void hnae_list_del(spinlock_t *lock, struct list_head *node)
32 {
33 unsigned long flags;
34
35 spin_lock_irqsave(lock, flags);
36 list_del_rcu(node);
37 spin_unlock_irqrestore(lock, flags);
38 }
39
hnae_alloc_buffer(struct hnae_ring * ring,struct hnae_desc_cb * cb)40 static int hnae_alloc_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
41 {
42 unsigned int order = hnae_page_order(ring);
43 struct page *p = dev_alloc_pages(order);
44
45 if (!p)
46 return -ENOMEM;
47
48 cb->priv = p;
49 cb->page_offset = 0;
50 cb->reuse_flag = 0;
51 cb->buf = page_address(p);
52 cb->length = hnae_page_size(ring);
53 cb->type = DESC_TYPE_PAGE;
54
55 return 0;
56 }
57
hnae_free_buffer(struct hnae_ring * ring,struct hnae_desc_cb * cb)58 static void hnae_free_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
59 {
60 if (cb->type == DESC_TYPE_SKB)
61 dev_kfree_skb_any((struct sk_buff *)cb->priv);
62 else if (unlikely(is_rx_ring(ring)))
63 put_page((struct page *)cb->priv);
64 memset(cb, 0, sizeof(*cb));
65 }
66
hnae_map_buffer(struct hnae_ring * ring,struct hnae_desc_cb * cb)67 static int hnae_map_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
68 {
69 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
70 cb->length, ring_to_dma_dir(ring));
71
72 if (dma_mapping_error(ring_to_dev(ring), cb->dma))
73 return -EIO;
74
75 return 0;
76 }
77
hnae_unmap_buffer(struct hnae_ring * ring,struct hnae_desc_cb * cb)78 static void hnae_unmap_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
79 {
80 if (cb->type == DESC_TYPE_SKB)
81 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
82 ring_to_dma_dir(ring));
83 else
84 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
85 ring_to_dma_dir(ring));
86 }
87
88 static struct hnae_buf_ops hnae_bops = {
89 .alloc_buffer = hnae_alloc_buffer,
90 .free_buffer = hnae_free_buffer,
91 .map_buffer = hnae_map_buffer,
92 .unmap_buffer = hnae_unmap_buffer,
93 };
94
__ae_match(struct device * dev,const void * data)95 static int __ae_match(struct device *dev, const void *data)
96 {
97 struct hnae_ae_dev *hdev = cls_to_ae_dev(dev);
98 const char *ae_id = data;
99
100 if (!strncmp(ae_id, hdev->name, AE_NAME_SIZE))
101 return 1;
102
103 return 0;
104 }
105
find_ae(const char * ae_id)106 static struct hnae_ae_dev *find_ae(const char *ae_id)
107 {
108 struct device *dev;
109
110 WARN_ON(!ae_id);
111
112 dev = class_find_device(hnae_class, NULL, ae_id, __ae_match);
113
114 return dev ? cls_to_ae_dev(dev) : NULL;
115 }
116
hnae_free_buffers(struct hnae_ring * ring)117 static void hnae_free_buffers(struct hnae_ring *ring)
118 {
119 int i;
120
121 for (i = 0; i < ring->desc_num; i++)
122 hnae_free_buffer_detach(ring, i);
123 }
124
125 /* Allocate memory for raw pkg, and map with dma */
hnae_alloc_buffers(struct hnae_ring * ring)126 static int hnae_alloc_buffers(struct hnae_ring *ring)
127 {
128 int i, j, ret;
129
130 for (i = 0; i < ring->desc_num; i++) {
131 ret = hnae_alloc_buffer_attach(ring, i);
132 if (ret)
133 goto out_buffer_fail;
134 }
135
136 return 0;
137
138 out_buffer_fail:
139 for (j = i - 1; j >= 0; j--)
140 hnae_free_buffer_detach(ring, j);
141 return ret;
142 }
143
144 /* free desc along with its attached buffer */
hnae_free_desc(struct hnae_ring * ring)145 static void hnae_free_desc(struct hnae_ring *ring)
146 {
147 dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
148 ring->desc_num * sizeof(ring->desc[0]),
149 ring_to_dma_dir(ring));
150 ring->desc_dma_addr = 0;
151 kfree(ring->desc);
152 ring->desc = NULL;
153 }
154
155 /* alloc desc, without buffer attached */
hnae_alloc_desc(struct hnae_ring * ring)156 static int hnae_alloc_desc(struct hnae_ring *ring)
157 {
158 int size = ring->desc_num * sizeof(ring->desc[0]);
159
160 ring->desc = kzalloc(size, GFP_KERNEL);
161 if (!ring->desc)
162 return -ENOMEM;
163
164 ring->desc_dma_addr = dma_map_single(ring_to_dev(ring),
165 ring->desc, size, ring_to_dma_dir(ring));
166 if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) {
167 ring->desc_dma_addr = 0;
168 kfree(ring->desc);
169 ring->desc = NULL;
170 return -ENOMEM;
171 }
172
173 return 0;
174 }
175
176 /* fini ring, also free the buffer for the ring */
hnae_fini_ring(struct hnae_ring * ring)177 static void hnae_fini_ring(struct hnae_ring *ring)
178 {
179 if (is_rx_ring(ring))
180 hnae_free_buffers(ring);
181
182 hnae_free_desc(ring);
183 kfree(ring->desc_cb);
184 ring->desc_cb = NULL;
185 ring->next_to_clean = 0;
186 ring->next_to_use = 0;
187 }
188
189 /* init ring, and with buffer for rx ring */
190 static int
hnae_init_ring(struct hnae_queue * q,struct hnae_ring * ring,int flags)191 hnae_init_ring(struct hnae_queue *q, struct hnae_ring *ring, int flags)
192 {
193 int ret;
194
195 if (ring->desc_num <= 0 || ring->buf_size <= 0)
196 return -EINVAL;
197
198 ring->q = q;
199 ring->flags = flags;
200 assert(!ring->desc && !ring->desc_cb && !ring->desc_dma_addr);
201
202 /* not matter for tx or rx ring, the ntc and ntc start from 0 */
203 assert(ring->next_to_use == 0);
204 assert(ring->next_to_clean == 0);
205
206 ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]),
207 GFP_KERNEL);
208 if (!ring->desc_cb) {
209 ret = -ENOMEM;
210 goto out;
211 }
212
213 ret = hnae_alloc_desc(ring);
214 if (ret)
215 goto out_with_desc_cb;
216
217 if (is_rx_ring(ring)) {
218 ret = hnae_alloc_buffers(ring);
219 if (ret)
220 goto out_with_desc;
221 }
222
223 return 0;
224
225 out_with_desc:
226 hnae_free_desc(ring);
227 out_with_desc_cb:
228 kfree(ring->desc_cb);
229 ring->desc_cb = NULL;
230 out:
231 return ret;
232 }
233
hnae_init_queue(struct hnae_handle * h,struct hnae_queue * q,struct hnae_ae_dev * dev)234 static int hnae_init_queue(struct hnae_handle *h, struct hnae_queue *q,
235 struct hnae_ae_dev *dev)
236 {
237 int ret;
238
239 q->dev = dev;
240 q->handle = h;
241
242 ret = hnae_init_ring(q, &q->tx_ring, q->tx_ring.flags | RINGF_DIR);
243 if (ret)
244 goto out;
245
246 ret = hnae_init_ring(q, &q->rx_ring, q->rx_ring.flags & ~RINGF_DIR);
247 if (ret)
248 goto out_with_tx_ring;
249
250 if (dev->ops->init_queue)
251 dev->ops->init_queue(q);
252
253 return 0;
254
255 out_with_tx_ring:
256 hnae_fini_ring(&q->tx_ring);
257 out:
258 return ret;
259 }
260
hnae_fini_queue(struct hnae_queue * q)261 static void hnae_fini_queue(struct hnae_queue *q)
262 {
263 if (q->dev->ops->fini_queue)
264 q->dev->ops->fini_queue(q);
265
266 hnae_fini_ring(&q->tx_ring);
267 hnae_fini_ring(&q->rx_ring);
268 }
269
270 /**
271 * ae_chain - define ae chain head
272 */
273 static RAW_NOTIFIER_HEAD(ae_chain);
274
hnae_register_notifier(struct notifier_block * nb)275 int hnae_register_notifier(struct notifier_block *nb)
276 {
277 return raw_notifier_chain_register(&ae_chain, nb);
278 }
279 EXPORT_SYMBOL(hnae_register_notifier);
280
hnae_unregister_notifier(struct notifier_block * nb)281 void hnae_unregister_notifier(struct notifier_block *nb)
282 {
283 if (raw_notifier_chain_unregister(&ae_chain, nb))
284 dev_err(NULL, "notifier chain unregister fail\n");
285 }
286 EXPORT_SYMBOL(hnae_unregister_notifier);
287
hnae_reinit_handle(struct hnae_handle * handle)288 int hnae_reinit_handle(struct hnae_handle *handle)
289 {
290 int i, j;
291 int ret;
292
293 for (i = 0; i < handle->q_num; i++) /* free ring*/
294 hnae_fini_queue(handle->qs[i]);
295
296 if (handle->dev->ops->reset)
297 handle->dev->ops->reset(handle);
298
299 for (i = 0; i < handle->q_num; i++) {/* reinit ring*/
300 ret = hnae_init_queue(handle, handle->qs[i], handle->dev);
301 if (ret)
302 goto out_when_init_queue;
303 }
304 return 0;
305 out_when_init_queue:
306 for (j = i - 1; j >= 0; j--)
307 hnae_fini_queue(handle->qs[j]);
308 return ret;
309 }
310 EXPORT_SYMBOL(hnae_reinit_handle);
311
312 /* hnae_get_handle - get a handle from the AE
313 * @owner_dev: the dev use this handle
314 * @ae_id: the id of the ae to be used
315 * @ae_opts: the options set for the handle
316 * @bops: the callbacks for buffer management
317 *
318 * return handle ptr or ERR_PTR
319 */
hnae_get_handle(struct device * owner_dev,const char * ae_id,u32 port_id,struct hnae_buf_ops * bops)320 struct hnae_handle *hnae_get_handle(struct device *owner_dev,
321 const char *ae_id, u32 port_id,
322 struct hnae_buf_ops *bops)
323 {
324 struct hnae_ae_dev *dev;
325 struct hnae_handle *handle;
326 int i, j;
327 int ret;
328
329 dev = find_ae(ae_id);
330 if (!dev)
331 return ERR_PTR(-ENODEV);
332
333 handle = dev->ops->get_handle(dev, port_id);
334 if (IS_ERR(handle)) {
335 put_device(&dev->cls_dev);
336 return handle;
337 }
338
339 handle->dev = dev;
340 handle->owner_dev = owner_dev;
341 handle->bops = bops ? bops : &hnae_bops;
342 handle->eport_id = port_id;
343
344 for (i = 0; i < handle->q_num; i++) {
345 ret = hnae_init_queue(handle, handle->qs[i], dev);
346 if (ret)
347 goto out_when_init_queue;
348 }
349
350 __module_get(dev->owner);
351
352 hnae_list_add(&dev->lock, &handle->node, &dev->handle_list);
353
354 return handle;
355
356 out_when_init_queue:
357 for (j = i - 1; j >= 0; j--)
358 hnae_fini_queue(handle->qs[j]);
359
360 put_device(&dev->cls_dev);
361
362 return ERR_PTR(-ENOMEM);
363 }
364 EXPORT_SYMBOL(hnae_get_handle);
365
hnae_put_handle(struct hnae_handle * h)366 void hnae_put_handle(struct hnae_handle *h)
367 {
368 struct hnae_ae_dev *dev = h->dev;
369 int i;
370
371 for (i = 0; i < h->q_num; i++)
372 hnae_fini_queue(h->qs[i]);
373
374 if (h->dev->ops->reset)
375 h->dev->ops->reset(h);
376
377 hnae_list_del(&dev->lock, &h->node);
378
379 if (dev->ops->put_handle)
380 dev->ops->put_handle(h);
381
382 module_put(dev->owner);
383
384 put_device(&dev->cls_dev);
385 }
386 EXPORT_SYMBOL(hnae_put_handle);
387
hnae_release(struct device * dev)388 static void hnae_release(struct device *dev)
389 {
390 }
391
392 /**
393 * hnae_ae_register - register a AE engine to hnae framework
394 * @hdev: the hnae ae engine device
395 * @owner: the module who provides this dev
396 * NOTE: the duplicated name will not be checked
397 */
hnae_ae_register(struct hnae_ae_dev * hdev,struct module * owner)398 int hnae_ae_register(struct hnae_ae_dev *hdev, struct module *owner)
399 {
400 static atomic_t id = ATOMIC_INIT(-1);
401 int ret;
402
403 if (!hdev->dev)
404 return -ENODEV;
405
406 if (!hdev->ops || !hdev->ops->get_handle ||
407 !hdev->ops->toggle_ring_irq ||
408 !hdev->ops->toggle_queue_status ||
409 !hdev->ops->get_status || !hdev->ops->adjust_link)
410 return -EINVAL;
411
412 hdev->owner = owner;
413 hdev->id = (int)atomic_inc_return(&id);
414 hdev->cls_dev.parent = hdev->dev;
415 hdev->cls_dev.class = hnae_class;
416 hdev->cls_dev.release = hnae_release;
417 (void)dev_set_name(&hdev->cls_dev, "hnae%d", hdev->id);
418 ret = device_register(&hdev->cls_dev);
419 if (ret)
420 return ret;
421
422 __module_get(THIS_MODULE);
423
424 INIT_LIST_HEAD(&hdev->handle_list);
425 spin_lock_init(&hdev->lock);
426
427 ret = raw_notifier_call_chain(&ae_chain, HNAE_AE_REGISTER, NULL);
428 if (ret)
429 dev_dbg(hdev->dev,
430 "has not notifier for AE: %s\n", hdev->name);
431
432 return 0;
433 }
434 EXPORT_SYMBOL(hnae_ae_register);
435
436 /**
437 * hnae_ae_unregister - unregisters a HNAE AE engine
438 * @cdev: the device to unregister
439 */
hnae_ae_unregister(struct hnae_ae_dev * hdev)440 void hnae_ae_unregister(struct hnae_ae_dev *hdev)
441 {
442 device_unregister(&hdev->cls_dev);
443 module_put(THIS_MODULE);
444 }
445 EXPORT_SYMBOL(hnae_ae_unregister);
446
hnae_init(void)447 static int __init hnae_init(void)
448 {
449 hnae_class = class_create(THIS_MODULE, "hnae");
450 return PTR_ERR_OR_ZERO(hnae_class);
451 }
452
hnae_exit(void)453 static void __exit hnae_exit(void)
454 {
455 class_destroy(hnae_class);
456 }
457
458 subsys_initcall(hnae_init);
459 module_exit(hnae_exit);
460
461 MODULE_AUTHOR("Hisilicon, Inc.");
462 MODULE_LICENSE("GPL");
463 MODULE_DESCRIPTION("Hisilicon Network Acceleration Engine Framework");
464
465 /* vi: set tw=78 noet: */
466