• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Keystone Queue Manager subsystem driver
3  *
4  * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
5  * Authors:	Sandeep Nair <sandeep_n@ti.com>
6  *		Cyril Chemparathy <cyril@ti.com>
7  *		Santosh Shilimkar <santosh.shilimkar@ti.com>
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * version 2 as published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  */
18 
19 #include <linux/debugfs.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/firmware.h>
22 #include <linux/interrupt.h>
23 #include <linux/io.h>
24 #include <linux/module.h>
25 #include <linux/of_address.h>
26 #include <linux/of_device.h>
27 #include <linux/of_irq.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/slab.h>
30 #include <linux/soc/ti/knav_qmss.h>
31 
32 #include "knav_qmss.h"
33 
34 static struct knav_device *kdev;
35 static DEFINE_MUTEX(knav_dev_lock);
36 
37 /* Queue manager register indices in DTS */
38 #define KNAV_QUEUE_PEEK_REG_INDEX	0
39 #define KNAV_QUEUE_STATUS_REG_INDEX	1
40 #define KNAV_QUEUE_CONFIG_REG_INDEX	2
41 #define KNAV_QUEUE_REGION_REG_INDEX	3
42 #define KNAV_QUEUE_PUSH_REG_INDEX	4
43 #define KNAV_QUEUE_POP_REG_INDEX	5
44 
45 /* PDSP register indices in DTS */
46 #define KNAV_QUEUE_PDSP_IRAM_REG_INDEX	0
47 #define KNAV_QUEUE_PDSP_REGS_REG_INDEX	1
48 #define KNAV_QUEUE_PDSP_INTD_REG_INDEX	2
49 #define KNAV_QUEUE_PDSP_CMD_REG_INDEX	3
50 
51 #define knav_queue_idx_to_inst(kdev, idx)			\
52 	(kdev->instances + (idx << kdev->inst_shift))
53 
54 #define for_each_handle_rcu(qh, inst)			\
55 	list_for_each_entry_rcu(qh, &inst->handles, list)
56 
57 #define for_each_instance(idx, inst, kdev)		\
58 	for (idx = 0, inst = kdev->instances;		\
59 	     idx < (kdev)->num_queues_in_use;			\
60 	     idx++, inst = knav_queue_idx_to_inst(kdev, idx))
61 
62 /* All firmware file names end up here. List the firmware file names below.
63  * Newest followed by older ones. Search is done from start of the array
64  * until a firmware file is found.
65  */
66 const char *knav_acc_firmwares[] = {"ks2_qmss_pdsp_acc48.bin"};
67 
68 /**
69  * knav_queue_notify: qmss queue notfier call
70  *
71  * @inst:		qmss queue instance like accumulator
72  */
knav_queue_notify(struct knav_queue_inst * inst)73 void knav_queue_notify(struct knav_queue_inst *inst)
74 {
75 	struct knav_queue *qh;
76 
77 	if (!inst)
78 		return;
79 
80 	rcu_read_lock();
81 	for_each_handle_rcu(qh, inst) {
82 		if (atomic_read(&qh->notifier_enabled) <= 0)
83 			continue;
84 		if (WARN_ON(!qh->notifier_fn))
85 			continue;
86 		atomic_inc(&qh->stats.notifies);
87 		qh->notifier_fn(qh->notifier_fn_arg);
88 	}
89 	rcu_read_unlock();
90 }
91 EXPORT_SYMBOL_GPL(knav_queue_notify);
92 
knav_queue_int_handler(int irq,void * _instdata)93 static irqreturn_t knav_queue_int_handler(int irq, void *_instdata)
94 {
95 	struct knav_queue_inst *inst = _instdata;
96 
97 	knav_queue_notify(inst);
98 	return IRQ_HANDLED;
99 }
100 
knav_queue_setup_irq(struct knav_range_info * range,struct knav_queue_inst * inst)101 static int knav_queue_setup_irq(struct knav_range_info *range,
102 			  struct knav_queue_inst *inst)
103 {
104 	unsigned queue = inst->id - range->queue_base;
105 	int ret = 0, irq;
106 
107 	if (range->flags & RANGE_HAS_IRQ) {
108 		irq = range->irqs[queue].irq;
109 		ret = request_irq(irq, knav_queue_int_handler, 0,
110 					inst->irq_name, inst);
111 		if (ret)
112 			return ret;
113 		disable_irq(irq);
114 		if (range->irqs[queue].cpu_mask) {
115 			ret = irq_set_affinity_hint(irq, range->irqs[queue].cpu_mask);
116 			if (ret) {
117 				dev_warn(range->kdev->dev,
118 					 "Failed to set IRQ affinity\n");
119 				return ret;
120 			}
121 		}
122 	}
123 	return ret;
124 }
125 
knav_queue_free_irq(struct knav_queue_inst * inst)126 static void knav_queue_free_irq(struct knav_queue_inst *inst)
127 {
128 	struct knav_range_info *range = inst->range;
129 	unsigned queue = inst->id - inst->range->queue_base;
130 	int irq;
131 
132 	if (range->flags & RANGE_HAS_IRQ) {
133 		irq = range->irqs[queue].irq;
134 		irq_set_affinity_hint(irq, NULL);
135 		free_irq(irq, inst);
136 	}
137 }
138 
knav_queue_is_busy(struct knav_queue_inst * inst)139 static inline bool knav_queue_is_busy(struct knav_queue_inst *inst)
140 {
141 	return !list_empty(&inst->handles);
142 }
143 
knav_queue_is_reserved(struct knav_queue_inst * inst)144 static inline bool knav_queue_is_reserved(struct knav_queue_inst *inst)
145 {
146 	return inst->range->flags & RANGE_RESERVED;
147 }
148 
knav_queue_is_shared(struct knav_queue_inst * inst)149 static inline bool knav_queue_is_shared(struct knav_queue_inst *inst)
150 {
151 	struct knav_queue *tmp;
152 
153 	rcu_read_lock();
154 	for_each_handle_rcu(tmp, inst) {
155 		if (tmp->flags & KNAV_QUEUE_SHARED) {
156 			rcu_read_unlock();
157 			return true;
158 		}
159 	}
160 	rcu_read_unlock();
161 	return false;
162 }
163 
knav_queue_match_type(struct knav_queue_inst * inst,unsigned type)164 static inline bool knav_queue_match_type(struct knav_queue_inst *inst,
165 						unsigned type)
166 {
167 	if ((type == KNAV_QUEUE_QPEND) &&
168 	    (inst->range->flags & RANGE_HAS_IRQ)) {
169 		return true;
170 	} else if ((type == KNAV_QUEUE_ACC) &&
171 		(inst->range->flags & RANGE_HAS_ACCUMULATOR)) {
172 		return true;
173 	} else if ((type == KNAV_QUEUE_GP) &&
174 		!(inst->range->flags &
175 			(RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ))) {
176 		return true;
177 	}
178 	return false;
179 }
180 
181 static inline struct knav_queue_inst *
knav_queue_match_id_to_inst(struct knav_device * kdev,unsigned id)182 knav_queue_match_id_to_inst(struct knav_device *kdev, unsigned id)
183 {
184 	struct knav_queue_inst *inst;
185 	int idx;
186 
187 	for_each_instance(idx, inst, kdev) {
188 		if (inst->id == id)
189 			return inst;
190 	}
191 	return NULL;
192 }
193 
knav_queue_find_by_id(int id)194 static inline struct knav_queue_inst *knav_queue_find_by_id(int id)
195 {
196 	if (kdev->base_id <= id &&
197 	    kdev->base_id + kdev->num_queues > id) {
198 		id -= kdev->base_id;
199 		return knav_queue_match_id_to_inst(kdev, id);
200 	}
201 	return NULL;
202 }
203 
__knav_queue_open(struct knav_queue_inst * inst,const char * name,unsigned flags)204 static struct knav_queue *__knav_queue_open(struct knav_queue_inst *inst,
205 				      const char *name, unsigned flags)
206 {
207 	struct knav_queue *qh;
208 	unsigned id;
209 	int ret = 0;
210 
211 	qh = devm_kzalloc(inst->kdev->dev, sizeof(*qh), GFP_KERNEL);
212 	if (!qh)
213 		return ERR_PTR(-ENOMEM);
214 
215 	qh->flags = flags;
216 	qh->inst = inst;
217 	id = inst->id - inst->qmgr->start_queue;
218 	qh->reg_push = &inst->qmgr->reg_push[id];
219 	qh->reg_pop = &inst->qmgr->reg_pop[id];
220 	qh->reg_peek = &inst->qmgr->reg_peek[id];
221 
222 	/* first opener? */
223 	if (!knav_queue_is_busy(inst)) {
224 		struct knav_range_info *range = inst->range;
225 
226 		inst->name = kstrndup(name, KNAV_NAME_SIZE, GFP_KERNEL);
227 		if (range->ops && range->ops->open_queue)
228 			ret = range->ops->open_queue(range, inst, flags);
229 
230 		if (ret) {
231 			devm_kfree(inst->kdev->dev, qh);
232 			return ERR_PTR(ret);
233 		}
234 	}
235 	list_add_tail_rcu(&qh->list, &inst->handles);
236 	return qh;
237 }
238 
239 static struct knav_queue *
knav_queue_open_by_id(const char * name,unsigned id,unsigned flags)240 knav_queue_open_by_id(const char *name, unsigned id, unsigned flags)
241 {
242 	struct knav_queue_inst *inst;
243 	struct knav_queue *qh;
244 
245 	mutex_lock(&knav_dev_lock);
246 
247 	qh = ERR_PTR(-ENODEV);
248 	inst = knav_queue_find_by_id(id);
249 	if (!inst)
250 		goto unlock_ret;
251 
252 	qh = ERR_PTR(-EEXIST);
253 	if (!(flags & KNAV_QUEUE_SHARED) && knav_queue_is_busy(inst))
254 		goto unlock_ret;
255 
256 	qh = ERR_PTR(-EBUSY);
257 	if ((flags & KNAV_QUEUE_SHARED) &&
258 	    (knav_queue_is_busy(inst) && !knav_queue_is_shared(inst)))
259 		goto unlock_ret;
260 
261 	qh = __knav_queue_open(inst, name, flags);
262 
263 unlock_ret:
264 	mutex_unlock(&knav_dev_lock);
265 
266 	return qh;
267 }
268 
knav_queue_open_by_type(const char * name,unsigned type,unsigned flags)269 static struct knav_queue *knav_queue_open_by_type(const char *name,
270 						unsigned type, unsigned flags)
271 {
272 	struct knav_queue_inst *inst;
273 	struct knav_queue *qh = ERR_PTR(-EINVAL);
274 	int idx;
275 
276 	mutex_lock(&knav_dev_lock);
277 
278 	for_each_instance(idx, inst, kdev) {
279 		if (knav_queue_is_reserved(inst))
280 			continue;
281 		if (!knav_queue_match_type(inst, type))
282 			continue;
283 		if (knav_queue_is_busy(inst))
284 			continue;
285 		qh = __knav_queue_open(inst, name, flags);
286 		goto unlock_ret;
287 	}
288 
289 unlock_ret:
290 	mutex_unlock(&knav_dev_lock);
291 	return qh;
292 }
293 
knav_queue_set_notify(struct knav_queue_inst * inst,bool enabled)294 static void knav_queue_set_notify(struct knav_queue_inst *inst, bool enabled)
295 {
296 	struct knav_range_info *range = inst->range;
297 
298 	if (range->ops && range->ops->set_notify)
299 		range->ops->set_notify(range, inst, enabled);
300 }
301 
knav_queue_enable_notifier(struct knav_queue * qh)302 static int knav_queue_enable_notifier(struct knav_queue *qh)
303 {
304 	struct knav_queue_inst *inst = qh->inst;
305 	bool first;
306 
307 	if (WARN_ON(!qh->notifier_fn))
308 		return -EINVAL;
309 
310 	/* Adjust the per handle notifier count */
311 	first = (atomic_inc_return(&qh->notifier_enabled) == 1);
312 	if (!first)
313 		return 0; /* nothing to do */
314 
315 	/* Now adjust the per instance notifier count */
316 	first = (atomic_inc_return(&inst->num_notifiers) == 1);
317 	if (first)
318 		knav_queue_set_notify(inst, true);
319 
320 	return 0;
321 }
322 
knav_queue_disable_notifier(struct knav_queue * qh)323 static int knav_queue_disable_notifier(struct knav_queue *qh)
324 {
325 	struct knav_queue_inst *inst = qh->inst;
326 	bool last;
327 
328 	last = (atomic_dec_return(&qh->notifier_enabled) == 0);
329 	if (!last)
330 		return 0; /* nothing to do */
331 
332 	last = (atomic_dec_return(&inst->num_notifiers) == 0);
333 	if (last)
334 		knav_queue_set_notify(inst, false);
335 
336 	return 0;
337 }
338 
knav_queue_set_notifier(struct knav_queue * qh,struct knav_queue_notify_config * cfg)339 static int knav_queue_set_notifier(struct knav_queue *qh,
340 				struct knav_queue_notify_config *cfg)
341 {
342 	knav_queue_notify_fn old_fn = qh->notifier_fn;
343 
344 	if (!cfg)
345 		return -EINVAL;
346 
347 	if (!(qh->inst->range->flags & (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ)))
348 		return -ENOTSUPP;
349 
350 	if (!cfg->fn && old_fn)
351 		knav_queue_disable_notifier(qh);
352 
353 	qh->notifier_fn = cfg->fn;
354 	qh->notifier_fn_arg = cfg->fn_arg;
355 
356 	if (cfg->fn && !old_fn)
357 		knav_queue_enable_notifier(qh);
358 
359 	return 0;
360 }
361 
knav_gp_set_notify(struct knav_range_info * range,struct knav_queue_inst * inst,bool enabled)362 static int knav_gp_set_notify(struct knav_range_info *range,
363 			       struct knav_queue_inst *inst,
364 			       bool enabled)
365 {
366 	unsigned queue;
367 
368 	if (range->flags & RANGE_HAS_IRQ) {
369 		queue = inst->id - range->queue_base;
370 		if (enabled)
371 			enable_irq(range->irqs[queue].irq);
372 		else
373 			disable_irq_nosync(range->irqs[queue].irq);
374 	}
375 	return 0;
376 }
377 
knav_gp_open_queue(struct knav_range_info * range,struct knav_queue_inst * inst,unsigned flags)378 static int knav_gp_open_queue(struct knav_range_info *range,
379 				struct knav_queue_inst *inst, unsigned flags)
380 {
381 	return knav_queue_setup_irq(range, inst);
382 }
383 
knav_gp_close_queue(struct knav_range_info * range,struct knav_queue_inst * inst)384 static int knav_gp_close_queue(struct knav_range_info *range,
385 				struct knav_queue_inst *inst)
386 {
387 	knav_queue_free_irq(inst);
388 	return 0;
389 }
390 
391 struct knav_range_ops knav_gp_range_ops = {
392 	.set_notify	= knav_gp_set_notify,
393 	.open_queue	= knav_gp_open_queue,
394 	.close_queue	= knav_gp_close_queue,
395 };
396 
397 
knav_queue_get_count(void * qhandle)398 static int knav_queue_get_count(void *qhandle)
399 {
400 	struct knav_queue *qh = qhandle;
401 	struct knav_queue_inst *inst = qh->inst;
402 
403 	return readl_relaxed(&qh->reg_peek[0].entry_count) +
404 		atomic_read(&inst->desc_count);
405 }
406 
knav_queue_debug_show_instance(struct seq_file * s,struct knav_queue_inst * inst)407 static void knav_queue_debug_show_instance(struct seq_file *s,
408 					struct knav_queue_inst *inst)
409 {
410 	struct knav_device *kdev = inst->kdev;
411 	struct knav_queue *qh;
412 
413 	if (!knav_queue_is_busy(inst))
414 		return;
415 
416 	seq_printf(s, "\tqueue id %d (%s)\n",
417 		   kdev->base_id + inst->id, inst->name);
418 	for_each_handle_rcu(qh, inst) {
419 		seq_printf(s, "\t\thandle %p: ", qh);
420 		seq_printf(s, "pushes %8d, ",
421 			   atomic_read(&qh->stats.pushes));
422 		seq_printf(s, "pops %8d, ",
423 			   atomic_read(&qh->stats.pops));
424 		seq_printf(s, "count %8d, ",
425 			   knav_queue_get_count(qh));
426 		seq_printf(s, "notifies %8d, ",
427 			   atomic_read(&qh->stats.notifies));
428 		seq_printf(s, "push errors %8d, ",
429 			   atomic_read(&qh->stats.push_errors));
430 		seq_printf(s, "pop errors %8d\n",
431 			   atomic_read(&qh->stats.pop_errors));
432 	}
433 }
434 
knav_queue_debug_show(struct seq_file * s,void * v)435 static int knav_queue_debug_show(struct seq_file *s, void *v)
436 {
437 	struct knav_queue_inst *inst;
438 	int idx;
439 
440 	mutex_lock(&knav_dev_lock);
441 	seq_printf(s, "%s: %u-%u\n",
442 		   dev_name(kdev->dev), kdev->base_id,
443 		   kdev->base_id + kdev->num_queues - 1);
444 	for_each_instance(idx, inst, kdev)
445 		knav_queue_debug_show_instance(s, inst);
446 	mutex_unlock(&knav_dev_lock);
447 
448 	return 0;
449 }
450 
knav_queue_debug_open(struct inode * inode,struct file * file)451 static int knav_queue_debug_open(struct inode *inode, struct file *file)
452 {
453 	return single_open(file, knav_queue_debug_show, NULL);
454 }
455 
456 static const struct file_operations knav_queue_debug_ops = {
457 	.open		= knav_queue_debug_open,
458 	.read		= seq_read,
459 	.llseek		= seq_lseek,
460 	.release	= single_release,
461 };
462 
knav_queue_pdsp_wait(u32 * __iomem addr,unsigned timeout,u32 flags)463 static inline int knav_queue_pdsp_wait(u32 * __iomem addr, unsigned timeout,
464 					u32 flags)
465 {
466 	unsigned long end;
467 	u32 val = 0;
468 
469 	end = jiffies + msecs_to_jiffies(timeout);
470 	while (time_after(end, jiffies)) {
471 		val = readl_relaxed(addr);
472 		if (flags)
473 			val &= flags;
474 		if (!val)
475 			break;
476 		cpu_relax();
477 	}
478 	return val ? -ETIMEDOUT : 0;
479 }
480 
481 
knav_queue_flush(struct knav_queue * qh)482 static int knav_queue_flush(struct knav_queue *qh)
483 {
484 	struct knav_queue_inst *inst = qh->inst;
485 	unsigned id = inst->id - inst->qmgr->start_queue;
486 
487 	atomic_set(&inst->desc_count, 0);
488 	writel_relaxed(0, &inst->qmgr->reg_push[id].ptr_size_thresh);
489 	return 0;
490 }
491 
492 /**
493  * knav_queue_open()	- open a hardware queue
494  * @name		- name to give the queue handle
495  * @id			- desired queue number if any or specifes the type
496  *			  of queue
497  * @flags		- the following flags are applicable to queues:
498  *	KNAV_QUEUE_SHARED - allow the queue to be shared. Queues are
499  *			     exclusive by default.
500  *			     Subsequent attempts to open a shared queue should
501  *			     also have this flag.
502  *
503  * Returns a handle to the open hardware queue if successful. Use IS_ERR()
504  * to check the returned value for error codes.
505  */
knav_queue_open(const char * name,unsigned id,unsigned flags)506 void *knav_queue_open(const char *name, unsigned id,
507 					unsigned flags)
508 {
509 	struct knav_queue *qh = ERR_PTR(-EINVAL);
510 
511 	switch (id) {
512 	case KNAV_QUEUE_QPEND:
513 	case KNAV_QUEUE_ACC:
514 	case KNAV_QUEUE_GP:
515 		qh = knav_queue_open_by_type(name, id, flags);
516 		break;
517 
518 	default:
519 		qh = knav_queue_open_by_id(name, id, flags);
520 		break;
521 	}
522 	return qh;
523 }
524 EXPORT_SYMBOL_GPL(knav_queue_open);
525 
526 /**
527  * knav_queue_close()	- close a hardware queue handle
528  * @qh			- handle to close
529  */
knav_queue_close(void * qhandle)530 void knav_queue_close(void *qhandle)
531 {
532 	struct knav_queue *qh = qhandle;
533 	struct knav_queue_inst *inst = qh->inst;
534 
535 	while (atomic_read(&qh->notifier_enabled) > 0)
536 		knav_queue_disable_notifier(qh);
537 
538 	mutex_lock(&knav_dev_lock);
539 	list_del_rcu(&qh->list);
540 	mutex_unlock(&knav_dev_lock);
541 	synchronize_rcu();
542 	if (!knav_queue_is_busy(inst)) {
543 		struct knav_range_info *range = inst->range;
544 
545 		if (range->ops && range->ops->close_queue)
546 			range->ops->close_queue(range, inst);
547 	}
548 	devm_kfree(inst->kdev->dev, qh);
549 }
550 EXPORT_SYMBOL_GPL(knav_queue_close);
551 
552 /**
553  * knav_queue_device_control()	- Perform control operations on a queue
554  * @qh				- queue handle
555  * @cmd				- control commands
556  * @arg				- command argument
557  *
558  * Returns 0 on success, errno otherwise.
559  */
knav_queue_device_control(void * qhandle,enum knav_queue_ctrl_cmd cmd,unsigned long arg)560 int knav_queue_device_control(void *qhandle, enum knav_queue_ctrl_cmd cmd,
561 				unsigned long arg)
562 {
563 	struct knav_queue *qh = qhandle;
564 	struct knav_queue_notify_config *cfg;
565 	int ret;
566 
567 	switch ((int)cmd) {
568 	case KNAV_QUEUE_GET_ID:
569 		ret = qh->inst->kdev->base_id + qh->inst->id;
570 		break;
571 
572 	case KNAV_QUEUE_FLUSH:
573 		ret = knav_queue_flush(qh);
574 		break;
575 
576 	case KNAV_QUEUE_SET_NOTIFIER:
577 		cfg = (void *)arg;
578 		ret = knav_queue_set_notifier(qh, cfg);
579 		break;
580 
581 	case KNAV_QUEUE_ENABLE_NOTIFY:
582 		ret = knav_queue_enable_notifier(qh);
583 		break;
584 
585 	case KNAV_QUEUE_DISABLE_NOTIFY:
586 		ret = knav_queue_disable_notifier(qh);
587 		break;
588 
589 	case KNAV_QUEUE_GET_COUNT:
590 		ret = knav_queue_get_count(qh);
591 		break;
592 
593 	default:
594 		ret = -ENOTSUPP;
595 		break;
596 	}
597 	return ret;
598 }
599 EXPORT_SYMBOL_GPL(knav_queue_device_control);
600 
601 
602 
603 /**
604  * knav_queue_push()	- push data (or descriptor) to the tail of a queue
605  * @qh			- hardware queue handle
606  * @data		- data to push
607  * @size		- size of data to push
608  * @flags		- can be used to pass additional information
609  *
610  * Returns 0 on success, errno otherwise.
611  */
knav_queue_push(void * qhandle,dma_addr_t dma,unsigned size,unsigned flags)612 int knav_queue_push(void *qhandle, dma_addr_t dma,
613 					unsigned size, unsigned flags)
614 {
615 	struct knav_queue *qh = qhandle;
616 	u32 val;
617 
618 	val = (u32)dma | ((size / 16) - 1);
619 	writel_relaxed(val, &qh->reg_push[0].ptr_size_thresh);
620 
621 	atomic_inc(&qh->stats.pushes);
622 	return 0;
623 }
624 EXPORT_SYMBOL_GPL(knav_queue_push);
625 
626 /**
627  * knav_queue_pop()	- pop data (or descriptor) from the head of a queue
628  * @qh			- hardware queue handle
629  * @size		- (optional) size of the data pop'ed.
630  *
631  * Returns a DMA address on success, 0 on failure.
632  */
knav_queue_pop(void * qhandle,unsigned * size)633 dma_addr_t knav_queue_pop(void *qhandle, unsigned *size)
634 {
635 	struct knav_queue *qh = qhandle;
636 	struct knav_queue_inst *inst = qh->inst;
637 	dma_addr_t dma;
638 	u32 val, idx;
639 
640 	/* are we accumulated? */
641 	if (inst->descs) {
642 		if (unlikely(atomic_dec_return(&inst->desc_count) < 0)) {
643 			atomic_inc(&inst->desc_count);
644 			return 0;
645 		}
646 		idx  = atomic_inc_return(&inst->desc_head);
647 		idx &= ACC_DESCS_MASK;
648 		val = inst->descs[idx];
649 	} else {
650 		val = readl_relaxed(&qh->reg_pop[0].ptr_size_thresh);
651 		if (unlikely(!val))
652 			return 0;
653 	}
654 
655 	dma = val & DESC_PTR_MASK;
656 	if (size)
657 		*size = ((val & DESC_SIZE_MASK) + 1) * 16;
658 
659 	atomic_inc(&qh->stats.pops);
660 	return dma;
661 }
662 EXPORT_SYMBOL_GPL(knav_queue_pop);
663 
664 /* carve out descriptors and push into queue */
kdesc_fill_pool(struct knav_pool * pool)665 static void kdesc_fill_pool(struct knav_pool *pool)
666 {
667 	struct knav_region *region;
668 	int i;
669 
670 	region = pool->region;
671 	pool->desc_size = region->desc_size;
672 	for (i = 0; i < pool->num_desc; i++) {
673 		int index = pool->region_offset + i;
674 		dma_addr_t dma_addr;
675 		unsigned dma_size;
676 		dma_addr = region->dma_start + (region->desc_size * index);
677 		dma_size = ALIGN(pool->desc_size, SMP_CACHE_BYTES);
678 		dma_sync_single_for_device(pool->dev, dma_addr, dma_size,
679 					   DMA_TO_DEVICE);
680 		knav_queue_push(pool->queue, dma_addr, dma_size, 0);
681 	}
682 }
683 
684 /* pop out descriptors and close the queue */
kdesc_empty_pool(struct knav_pool * pool)685 static void kdesc_empty_pool(struct knav_pool *pool)
686 {
687 	dma_addr_t dma;
688 	unsigned size;
689 	void *desc;
690 	int i;
691 
692 	if (!pool->queue)
693 		return;
694 
695 	for (i = 0;; i++) {
696 		dma = knav_queue_pop(pool->queue, &size);
697 		if (!dma)
698 			break;
699 		desc = knav_pool_desc_dma_to_virt(pool, dma);
700 		if (!desc) {
701 			dev_dbg(pool->kdev->dev,
702 				"couldn't unmap desc, continuing\n");
703 			continue;
704 		}
705 	}
706 	WARN_ON(i != pool->num_desc);
707 	knav_queue_close(pool->queue);
708 }
709 
710 
711 /* Get the DMA address of a descriptor */
knav_pool_desc_virt_to_dma(void * ph,void * virt)712 dma_addr_t knav_pool_desc_virt_to_dma(void *ph, void *virt)
713 {
714 	struct knav_pool *pool = ph;
715 	return pool->region->dma_start + (virt - pool->region->virt_start);
716 }
717 EXPORT_SYMBOL_GPL(knav_pool_desc_virt_to_dma);
718 
knav_pool_desc_dma_to_virt(void * ph,dma_addr_t dma)719 void *knav_pool_desc_dma_to_virt(void *ph, dma_addr_t dma)
720 {
721 	struct knav_pool *pool = ph;
722 	return pool->region->virt_start + (dma - pool->region->dma_start);
723 }
724 EXPORT_SYMBOL_GPL(knav_pool_desc_dma_to_virt);
725 
726 /**
727  * knav_pool_create()	- Create a pool of descriptors
728  * @name		- name to give the pool handle
729  * @num_desc		- numbers of descriptors in the pool
730  * @region_id		- QMSS region id from which the descriptors are to be
731  *			  allocated.
732  *
733  * Returns a pool handle on success.
734  * Use IS_ERR_OR_NULL() to identify error values on return.
735  */
knav_pool_create(const char * name,int num_desc,int region_id)736 void *knav_pool_create(const char *name,
737 					int num_desc, int region_id)
738 {
739 	struct knav_region *reg_itr, *region = NULL;
740 	struct knav_pool *pool, *pi;
741 	struct list_head *node;
742 	unsigned last_offset;
743 	bool slot_found;
744 	int ret;
745 
746 	if (!kdev)
747 		return ERR_PTR(-EPROBE_DEFER);
748 
749 	if (!kdev->dev)
750 		return ERR_PTR(-ENODEV);
751 
752 	pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL);
753 	if (!pool) {
754 		dev_err(kdev->dev, "out of memory allocating pool\n");
755 		return ERR_PTR(-ENOMEM);
756 	}
757 
758 	for_each_region(kdev, reg_itr) {
759 		if (reg_itr->id != region_id)
760 			continue;
761 		region = reg_itr;
762 		break;
763 	}
764 
765 	if (!region) {
766 		dev_err(kdev->dev, "region-id(%d) not found\n", region_id);
767 		ret = -EINVAL;
768 		goto err;
769 	}
770 
771 	pool->queue = knav_queue_open(name, KNAV_QUEUE_GP, 0);
772 	if (IS_ERR_OR_NULL(pool->queue)) {
773 		dev_err(kdev->dev,
774 			"failed to open queue for pool(%s), error %ld\n",
775 			name, PTR_ERR(pool->queue));
776 		ret = PTR_ERR(pool->queue);
777 		goto err;
778 	}
779 
780 	pool->name = kstrndup(name, KNAV_NAME_SIZE, GFP_KERNEL);
781 	pool->kdev = kdev;
782 	pool->dev = kdev->dev;
783 
784 	mutex_lock(&knav_dev_lock);
785 
786 	if (num_desc > (region->num_desc - region->used_desc)) {
787 		dev_err(kdev->dev, "out of descs in region(%d) for pool(%s)\n",
788 			region_id, name);
789 		ret = -ENOMEM;
790 		goto err_unlock;
791 	}
792 
793 	/* Region maintains a sorted (by region offset) list of pools
794 	 * use the first free slot which is large enough to accomodate
795 	 * the request
796 	 */
797 	last_offset = 0;
798 	slot_found = false;
799 	node = &region->pools;
800 	list_for_each_entry(pi, &region->pools, region_inst) {
801 		if ((pi->region_offset - last_offset) >= num_desc) {
802 			slot_found = true;
803 			break;
804 		}
805 		last_offset = pi->region_offset + pi->num_desc;
806 	}
807 	node = &pi->region_inst;
808 
809 	if (slot_found) {
810 		pool->region = region;
811 		pool->num_desc = num_desc;
812 		pool->region_offset = last_offset;
813 		region->used_desc += num_desc;
814 		list_add_tail(&pool->list, &kdev->pools);
815 		list_add_tail(&pool->region_inst, node);
816 	} else {
817 		dev_err(kdev->dev, "pool(%s) create failed: fragmented desc pool in region(%d)\n",
818 			name, region_id);
819 		ret = -ENOMEM;
820 		goto err_unlock;
821 	}
822 
823 	mutex_unlock(&knav_dev_lock);
824 	kdesc_fill_pool(pool);
825 	return pool;
826 
827 err_unlock:
828 	mutex_unlock(&knav_dev_lock);
829 err:
830 	kfree(pool->name);
831 	devm_kfree(kdev->dev, pool);
832 	return ERR_PTR(ret);
833 }
834 EXPORT_SYMBOL_GPL(knav_pool_create);
835 
836 /**
837  * knav_pool_destroy()	- Free a pool of descriptors
838  * @pool		- pool handle
839  */
knav_pool_destroy(void * ph)840 void knav_pool_destroy(void *ph)
841 {
842 	struct knav_pool *pool = ph;
843 
844 	if (!pool)
845 		return;
846 
847 	if (!pool->region)
848 		return;
849 
850 	kdesc_empty_pool(pool);
851 	mutex_lock(&knav_dev_lock);
852 
853 	pool->region->used_desc -= pool->num_desc;
854 	list_del(&pool->region_inst);
855 	list_del(&pool->list);
856 
857 	mutex_unlock(&knav_dev_lock);
858 	kfree(pool->name);
859 	devm_kfree(kdev->dev, pool);
860 }
861 EXPORT_SYMBOL_GPL(knav_pool_destroy);
862 
863 
864 /**
865  * knav_pool_desc_get()	- Get a descriptor from the pool
866  * @pool			- pool handle
867  *
868  * Returns descriptor from the pool.
869  */
knav_pool_desc_get(void * ph)870 void *knav_pool_desc_get(void *ph)
871 {
872 	struct knav_pool *pool = ph;
873 	dma_addr_t dma;
874 	unsigned size;
875 	void *data;
876 
877 	dma = knav_queue_pop(pool->queue, &size);
878 	if (unlikely(!dma))
879 		return ERR_PTR(-ENOMEM);
880 	data = knav_pool_desc_dma_to_virt(pool, dma);
881 	return data;
882 }
883 EXPORT_SYMBOL_GPL(knav_pool_desc_get);
884 
885 /**
886  * knav_pool_desc_put()	- return a descriptor to the pool
887  * @pool			- pool handle
888  */
knav_pool_desc_put(void * ph,void * desc)889 void knav_pool_desc_put(void *ph, void *desc)
890 {
891 	struct knav_pool *pool = ph;
892 	dma_addr_t dma;
893 	dma = knav_pool_desc_virt_to_dma(pool, desc);
894 	knav_queue_push(pool->queue, dma, pool->region->desc_size, 0);
895 }
896 EXPORT_SYMBOL_GPL(knav_pool_desc_put);
897 
898 /**
899  * knav_pool_desc_map()	- Map descriptor for DMA transfer
900  * @pool			- pool handle
901  * @desc			- address of descriptor to map
902  * @size			- size of descriptor to map
903  * @dma				- DMA address return pointer
904  * @dma_sz			- adjusted return pointer
905  *
906  * Returns 0 on success, errno otherwise.
907  */
knav_pool_desc_map(void * ph,void * desc,unsigned size,dma_addr_t * dma,unsigned * dma_sz)908 int knav_pool_desc_map(void *ph, void *desc, unsigned size,
909 					dma_addr_t *dma, unsigned *dma_sz)
910 {
911 	struct knav_pool *pool = ph;
912 	*dma = knav_pool_desc_virt_to_dma(pool, desc);
913 	size = min(size, pool->region->desc_size);
914 	size = ALIGN(size, SMP_CACHE_BYTES);
915 	*dma_sz = size;
916 	dma_sync_single_for_device(pool->dev, *dma, size, DMA_TO_DEVICE);
917 
918 	/* Ensure the descriptor reaches to the memory */
919 	__iowmb();
920 
921 	return 0;
922 }
923 EXPORT_SYMBOL_GPL(knav_pool_desc_map);
924 
925 /**
926  * knav_pool_desc_unmap()	- Unmap descriptor after DMA transfer
927  * @pool			- pool handle
928  * @dma				- DMA address of descriptor to unmap
929  * @dma_sz			- size of descriptor to unmap
930  *
931  * Returns descriptor address on success, Use IS_ERR_OR_NULL() to identify
932  * error values on return.
933  */
knav_pool_desc_unmap(void * ph,dma_addr_t dma,unsigned dma_sz)934 void *knav_pool_desc_unmap(void *ph, dma_addr_t dma, unsigned dma_sz)
935 {
936 	struct knav_pool *pool = ph;
937 	unsigned desc_sz;
938 	void *desc;
939 
940 	desc_sz = min(dma_sz, pool->region->desc_size);
941 	desc = knav_pool_desc_dma_to_virt(pool, dma);
942 	dma_sync_single_for_cpu(pool->dev, dma, desc_sz, DMA_FROM_DEVICE);
943 	prefetch(desc);
944 	return desc;
945 }
946 EXPORT_SYMBOL_GPL(knav_pool_desc_unmap);
947 
948 /**
949  * knav_pool_count()	- Get the number of descriptors in pool.
950  * @pool		- pool handle
951  * Returns number of elements in the pool.
952  */
knav_pool_count(void * ph)953 int knav_pool_count(void *ph)
954 {
955 	struct knav_pool *pool = ph;
956 	return knav_queue_get_count(pool->queue);
957 }
958 EXPORT_SYMBOL_GPL(knav_pool_count);
959 
knav_queue_setup_region(struct knav_device * kdev,struct knav_region * region)960 static void knav_queue_setup_region(struct knav_device *kdev,
961 					struct knav_region *region)
962 {
963 	unsigned hw_num_desc, hw_desc_size, size;
964 	struct knav_reg_region __iomem  *regs;
965 	struct knav_qmgr_info *qmgr;
966 	struct knav_pool *pool;
967 	int id = region->id;
968 	struct page *page;
969 
970 	/* unused region? */
971 	if (!region->num_desc) {
972 		dev_warn(kdev->dev, "unused region %s\n", region->name);
973 		return;
974 	}
975 
976 	/* get hardware descriptor value */
977 	hw_num_desc = ilog2(region->num_desc - 1) + 1;
978 
979 	/* did we force fit ourselves into nothingness? */
980 	if (region->num_desc < 32) {
981 		region->num_desc = 0;
982 		dev_warn(kdev->dev, "too few descriptors in region %s\n",
983 			 region->name);
984 		return;
985 	}
986 
987 	size = region->num_desc * region->desc_size;
988 	region->virt_start = alloc_pages_exact(size, GFP_KERNEL | GFP_DMA |
989 						GFP_DMA32);
990 	if (!region->virt_start) {
991 		region->num_desc = 0;
992 		dev_err(kdev->dev, "memory alloc failed for region %s\n",
993 			region->name);
994 		return;
995 	}
996 	region->virt_end = region->virt_start + size;
997 	page = virt_to_page(region->virt_start);
998 
999 	region->dma_start = dma_map_page(kdev->dev, page, 0, size,
1000 					 DMA_BIDIRECTIONAL);
1001 	if (dma_mapping_error(kdev->dev, region->dma_start)) {
1002 		dev_err(kdev->dev, "dma map failed for region %s\n",
1003 			region->name);
1004 		goto fail;
1005 	}
1006 	region->dma_end = region->dma_start + size;
1007 
1008 	pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL);
1009 	if (!pool) {
1010 		dev_err(kdev->dev, "out of memory allocating dummy pool\n");
1011 		goto fail;
1012 	}
1013 	pool->num_desc = 0;
1014 	pool->region_offset = region->num_desc;
1015 	list_add(&pool->region_inst, &region->pools);
1016 
1017 	dev_dbg(kdev->dev,
1018 		"region %s (%d): size:%d, link:%d@%d, dma:%pad-%pad, virt:%p-%p\n",
1019 		region->name, id, region->desc_size, region->num_desc,
1020 		region->link_index, &region->dma_start, &region->dma_end,
1021 		region->virt_start, region->virt_end);
1022 
1023 	hw_desc_size = (region->desc_size / 16) - 1;
1024 	hw_num_desc -= 5;
1025 
1026 	for_each_qmgr(kdev, qmgr) {
1027 		regs = qmgr->reg_region + id;
1028 		writel_relaxed((u32)region->dma_start, &regs->base);
1029 		writel_relaxed(region->link_index, &regs->start_index);
1030 		writel_relaxed(hw_desc_size << 16 | hw_num_desc,
1031 			       &regs->size_count);
1032 	}
1033 	return;
1034 
1035 fail:
1036 	if (region->dma_start)
1037 		dma_unmap_page(kdev->dev, region->dma_start, size,
1038 				DMA_BIDIRECTIONAL);
1039 	if (region->virt_start)
1040 		free_pages_exact(region->virt_start, size);
1041 	region->num_desc = 0;
1042 	return;
1043 }
1044 
knav_queue_find_name(struct device_node * node)1045 static const char *knav_queue_find_name(struct device_node *node)
1046 {
1047 	const char *name;
1048 
1049 	if (of_property_read_string(node, "label", &name) < 0)
1050 		name = node->name;
1051 	if (!name)
1052 		name = "unknown";
1053 	return name;
1054 }
1055 
knav_queue_setup_regions(struct knav_device * kdev,struct device_node * regions)1056 static int knav_queue_setup_regions(struct knav_device *kdev,
1057 					struct device_node *regions)
1058 {
1059 	struct device *dev = kdev->dev;
1060 	struct knav_region *region;
1061 	struct device_node *child;
1062 	u32 temp[2];
1063 	int ret;
1064 
1065 	for_each_child_of_node(regions, child) {
1066 		region = devm_kzalloc(dev, sizeof(*region), GFP_KERNEL);
1067 		if (!region) {
1068 			dev_err(dev, "out of memory allocating region\n");
1069 			return -ENOMEM;
1070 		}
1071 
1072 		region->name = knav_queue_find_name(child);
1073 		of_property_read_u32(child, "id", &region->id);
1074 		ret = of_property_read_u32_array(child, "region-spec", temp, 2);
1075 		if (!ret) {
1076 			region->num_desc  = temp[0];
1077 			region->desc_size = temp[1];
1078 		} else {
1079 			dev_err(dev, "invalid region info %s\n", region->name);
1080 			devm_kfree(dev, region);
1081 			continue;
1082 		}
1083 
1084 		if (!of_get_property(child, "link-index", NULL)) {
1085 			dev_err(dev, "No link info for %s\n", region->name);
1086 			devm_kfree(dev, region);
1087 			continue;
1088 		}
1089 		ret = of_property_read_u32(child, "link-index",
1090 					   &region->link_index);
1091 		if (ret) {
1092 			dev_err(dev, "link index not found for %s\n",
1093 				region->name);
1094 			devm_kfree(dev, region);
1095 			continue;
1096 		}
1097 
1098 		INIT_LIST_HEAD(&region->pools);
1099 		list_add_tail(&region->list, &kdev->regions);
1100 	}
1101 	if (list_empty(&kdev->regions)) {
1102 		dev_err(dev, "no valid region information found\n");
1103 		return -ENODEV;
1104 	}
1105 
1106 	/* Next, we run through the regions and set things up */
1107 	for_each_region(kdev, region)
1108 		knav_queue_setup_region(kdev, region);
1109 
1110 	return 0;
1111 }
1112 
knav_get_link_ram(struct knav_device * kdev,const char * name,struct knav_link_ram_block * block)1113 static int knav_get_link_ram(struct knav_device *kdev,
1114 				       const char *name,
1115 				       struct knav_link_ram_block *block)
1116 {
1117 	struct platform_device *pdev = to_platform_device(kdev->dev);
1118 	struct device_node *node = pdev->dev.of_node;
1119 	u32 temp[2];
1120 
1121 	/*
1122 	 * Note: link ram resources are specified in "entry" sized units. In
1123 	 * reality, although entries are ~40bits in hardware, we treat them as
1124 	 * 64-bit entities here.
1125 	 *
1126 	 * For example, to specify the internal link ram for Keystone-I class
1127 	 * devices, we would set the linkram0 resource to 0x80000-0x83fff.
1128 	 *
1129 	 * This gets a bit weird when other link rams are used.  For example,
1130 	 * if the range specified is 0x0c000000-0x0c003fff (i.e., 16K entries
1131 	 * in MSMC SRAM), the actual memory used is 0x0c000000-0x0c020000,
1132 	 * which accounts for 64-bits per entry, for 16K entries.
1133 	 */
1134 	if (!of_property_read_u32_array(node, name , temp, 2)) {
1135 		if (temp[0]) {
1136 			/*
1137 			 * queue_base specified => using internal or onchip
1138 			 * link ram WARNING - we do not "reserve" this block
1139 			 */
1140 			block->dma = (dma_addr_t)temp[0];
1141 			block->virt = NULL;
1142 			block->size = temp[1];
1143 		} else {
1144 			block->size = temp[1];
1145 			/* queue_base not specific => allocate requested size */
1146 			block->virt = dmam_alloc_coherent(kdev->dev,
1147 						  8 * block->size, &block->dma,
1148 						  GFP_KERNEL);
1149 			if (!block->virt) {
1150 				dev_err(kdev->dev, "failed to alloc linkram\n");
1151 				return -ENOMEM;
1152 			}
1153 		}
1154 	} else {
1155 		return -ENODEV;
1156 	}
1157 	return 0;
1158 }
1159 
knav_queue_setup_link_ram(struct knav_device * kdev)1160 static int knav_queue_setup_link_ram(struct knav_device *kdev)
1161 {
1162 	struct knav_link_ram_block *block;
1163 	struct knav_qmgr_info *qmgr;
1164 
1165 	for_each_qmgr(kdev, qmgr) {
1166 		block = &kdev->link_rams[0];
1167 		dev_dbg(kdev->dev, "linkram0: dma:%pad, virt:%p, size:%x\n",
1168 			&block->dma, block->virt, block->size);
1169 		writel_relaxed((u32)block->dma, &qmgr->reg_config->link_ram_base0);
1170 		writel_relaxed(block->size, &qmgr->reg_config->link_ram_size0);
1171 
1172 		block++;
1173 		if (!block->size)
1174 			continue;
1175 
1176 		dev_dbg(kdev->dev, "linkram1: dma:%pad, virt:%p, size:%x\n",
1177 			&block->dma, block->virt, block->size);
1178 		writel_relaxed(block->dma, &qmgr->reg_config->link_ram_base1);
1179 	}
1180 
1181 	return 0;
1182 }
1183 
knav_setup_queue_range(struct knav_device * kdev,struct device_node * node)1184 static int knav_setup_queue_range(struct knav_device *kdev,
1185 					struct device_node *node)
1186 {
1187 	struct device *dev = kdev->dev;
1188 	struct knav_range_info *range;
1189 	struct knav_qmgr_info *qmgr;
1190 	u32 temp[2], start, end, id, index;
1191 	int ret, i;
1192 
1193 	range = devm_kzalloc(dev, sizeof(*range), GFP_KERNEL);
1194 	if (!range) {
1195 		dev_err(dev, "out of memory allocating range\n");
1196 		return -ENOMEM;
1197 	}
1198 
1199 	range->kdev = kdev;
1200 	range->name = knav_queue_find_name(node);
1201 	ret = of_property_read_u32_array(node, "qrange", temp, 2);
1202 	if (!ret) {
1203 		range->queue_base = temp[0] - kdev->base_id;
1204 		range->num_queues = temp[1];
1205 	} else {
1206 		dev_err(dev, "invalid queue range %s\n", range->name);
1207 		devm_kfree(dev, range);
1208 		return -EINVAL;
1209 	}
1210 
1211 	for (i = 0; i < RANGE_MAX_IRQS; i++) {
1212 		struct of_phandle_args oirq;
1213 
1214 		if (of_irq_parse_one(node, i, &oirq))
1215 			break;
1216 
1217 		range->irqs[i].irq = irq_create_of_mapping(&oirq);
1218 		if (range->irqs[i].irq == IRQ_NONE)
1219 			break;
1220 
1221 		range->num_irqs++;
1222 
1223 		if (IS_ENABLED(CONFIG_SMP) && oirq.args_count == 3) {
1224 			unsigned long mask;
1225 			int bit;
1226 
1227 			range->irqs[i].cpu_mask = devm_kzalloc(dev,
1228 							       cpumask_size(), GFP_KERNEL);
1229 			if (!range->irqs[i].cpu_mask)
1230 				return -ENOMEM;
1231 
1232 			mask = (oirq.args[2] & 0x0000ff00) >> 8;
1233 			for_each_set_bit(bit, &mask, BITS_PER_LONG)
1234 				cpumask_set_cpu(bit, range->irqs[i].cpu_mask);
1235 		}
1236 	}
1237 
1238 	range->num_irqs = min(range->num_irqs, range->num_queues);
1239 	if (range->num_irqs)
1240 		range->flags |= RANGE_HAS_IRQ;
1241 
1242 	if (of_get_property(node, "qalloc-by-id", NULL))
1243 		range->flags |= RANGE_RESERVED;
1244 
1245 	if (of_get_property(node, "accumulator", NULL)) {
1246 		ret = knav_init_acc_range(kdev, node, range);
1247 		if (ret < 0) {
1248 			devm_kfree(dev, range);
1249 			return ret;
1250 		}
1251 	} else {
1252 		range->ops = &knav_gp_range_ops;
1253 	}
1254 
1255 	/* set threshold to 1, and flush out the queues */
1256 	for_each_qmgr(kdev, qmgr) {
1257 		start = max(qmgr->start_queue, range->queue_base);
1258 		end   = min(qmgr->start_queue + qmgr->num_queues,
1259 			    range->queue_base + range->num_queues);
1260 		for (id = start; id < end; id++) {
1261 			index = id - qmgr->start_queue;
1262 			writel_relaxed(THRESH_GTE | 1,
1263 				       &qmgr->reg_peek[index].ptr_size_thresh);
1264 			writel_relaxed(0,
1265 				       &qmgr->reg_push[index].ptr_size_thresh);
1266 		}
1267 	}
1268 
1269 	list_add_tail(&range->list, &kdev->queue_ranges);
1270 	dev_dbg(dev, "added range %s: %d-%d, %d irqs%s%s%s\n",
1271 		range->name, range->queue_base,
1272 		range->queue_base + range->num_queues - 1,
1273 		range->num_irqs,
1274 		(range->flags & RANGE_HAS_IRQ) ? ", has irq" : "",
1275 		(range->flags & RANGE_RESERVED) ? ", reserved" : "",
1276 		(range->flags & RANGE_HAS_ACCUMULATOR) ? ", acc" : "");
1277 	kdev->num_queues_in_use += range->num_queues;
1278 	return 0;
1279 }
1280 
knav_setup_queue_pools(struct knav_device * kdev,struct device_node * queue_pools)1281 static int knav_setup_queue_pools(struct knav_device *kdev,
1282 				   struct device_node *queue_pools)
1283 {
1284 	struct device_node *type, *range;
1285 	int ret;
1286 
1287 	for_each_child_of_node(queue_pools, type) {
1288 		for_each_child_of_node(type, range) {
1289 			ret = knav_setup_queue_range(kdev, range);
1290 			/* return value ignored, we init the rest... */
1291 		}
1292 	}
1293 
1294 	/* ... and barf if they all failed! */
1295 	if (list_empty(&kdev->queue_ranges)) {
1296 		dev_err(kdev->dev, "no valid queue range found\n");
1297 		return -ENODEV;
1298 	}
1299 	return 0;
1300 }
1301 
knav_free_queue_range(struct knav_device * kdev,struct knav_range_info * range)1302 static void knav_free_queue_range(struct knav_device *kdev,
1303 				  struct knav_range_info *range)
1304 {
1305 	if (range->ops && range->ops->free_range)
1306 		range->ops->free_range(range);
1307 	list_del(&range->list);
1308 	devm_kfree(kdev->dev, range);
1309 }
1310 
knav_free_queue_ranges(struct knav_device * kdev)1311 static void knav_free_queue_ranges(struct knav_device *kdev)
1312 {
1313 	struct knav_range_info *range;
1314 
1315 	for (;;) {
1316 		range = first_queue_range(kdev);
1317 		if (!range)
1318 			break;
1319 		knav_free_queue_range(kdev, range);
1320 	}
1321 }
1322 
knav_queue_free_regions(struct knav_device * kdev)1323 static void knav_queue_free_regions(struct knav_device *kdev)
1324 {
1325 	struct knav_region *region;
1326 	struct knav_pool *pool, *tmp;
1327 	unsigned size;
1328 
1329 	for (;;) {
1330 		region = first_region(kdev);
1331 		if (!region)
1332 			break;
1333 		list_for_each_entry_safe(pool, tmp, &region->pools, region_inst)
1334 			knav_pool_destroy(pool);
1335 
1336 		size = region->virt_end - region->virt_start;
1337 		if (size)
1338 			free_pages_exact(region->virt_start, size);
1339 		list_del(&region->list);
1340 		devm_kfree(kdev->dev, region);
1341 	}
1342 }
1343 
knav_queue_map_reg(struct knav_device * kdev,struct device_node * node,int index)1344 static void __iomem *knav_queue_map_reg(struct knav_device *kdev,
1345 					struct device_node *node, int index)
1346 {
1347 	struct resource res;
1348 	void __iomem *regs;
1349 	int ret;
1350 
1351 	ret = of_address_to_resource(node, index, &res);
1352 	if (ret) {
1353 		dev_err(kdev->dev, "Can't translate of node(%s) address for index(%d)\n",
1354 			node->name, index);
1355 		return ERR_PTR(ret);
1356 	}
1357 
1358 	regs = devm_ioremap_resource(kdev->dev, &res);
1359 	if (IS_ERR(regs))
1360 		dev_err(kdev->dev, "Failed to map register base for index(%d) node(%s)\n",
1361 			index, node->name);
1362 	return regs;
1363 }
1364 
knav_queue_init_qmgrs(struct knav_device * kdev,struct device_node * qmgrs)1365 static int knav_queue_init_qmgrs(struct knav_device *kdev,
1366 					struct device_node *qmgrs)
1367 {
1368 	struct device *dev = kdev->dev;
1369 	struct knav_qmgr_info *qmgr;
1370 	struct device_node *child;
1371 	u32 temp[2];
1372 	int ret;
1373 
1374 	for_each_child_of_node(qmgrs, child) {
1375 		qmgr = devm_kzalloc(dev, sizeof(*qmgr), GFP_KERNEL);
1376 		if (!qmgr) {
1377 			dev_err(dev, "out of memory allocating qmgr\n");
1378 			return -ENOMEM;
1379 		}
1380 
1381 		ret = of_property_read_u32_array(child, "managed-queues",
1382 						 temp, 2);
1383 		if (!ret) {
1384 			qmgr->start_queue = temp[0];
1385 			qmgr->num_queues = temp[1];
1386 		} else {
1387 			dev_err(dev, "invalid qmgr queue range\n");
1388 			devm_kfree(dev, qmgr);
1389 			continue;
1390 		}
1391 
1392 		dev_info(dev, "qmgr start queue %d, number of queues %d\n",
1393 			 qmgr->start_queue, qmgr->num_queues);
1394 
1395 		qmgr->reg_peek =
1396 			knav_queue_map_reg(kdev, child,
1397 					   KNAV_QUEUE_PEEK_REG_INDEX);
1398 		qmgr->reg_status =
1399 			knav_queue_map_reg(kdev, child,
1400 					   KNAV_QUEUE_STATUS_REG_INDEX);
1401 		qmgr->reg_config =
1402 			knav_queue_map_reg(kdev, child,
1403 					   KNAV_QUEUE_CONFIG_REG_INDEX);
1404 		qmgr->reg_region =
1405 			knav_queue_map_reg(kdev, child,
1406 					   KNAV_QUEUE_REGION_REG_INDEX);
1407 		qmgr->reg_push =
1408 			knav_queue_map_reg(kdev, child,
1409 					   KNAV_QUEUE_PUSH_REG_INDEX);
1410 		qmgr->reg_pop =
1411 			knav_queue_map_reg(kdev, child,
1412 					   KNAV_QUEUE_POP_REG_INDEX);
1413 
1414 		if (IS_ERR(qmgr->reg_peek) || IS_ERR(qmgr->reg_status) ||
1415 		    IS_ERR(qmgr->reg_config) || IS_ERR(qmgr->reg_region) ||
1416 		    IS_ERR(qmgr->reg_push) || IS_ERR(qmgr->reg_pop)) {
1417 			dev_err(dev, "failed to map qmgr regs\n");
1418 			if (!IS_ERR(qmgr->reg_peek))
1419 				devm_iounmap(dev, qmgr->reg_peek);
1420 			if (!IS_ERR(qmgr->reg_status))
1421 				devm_iounmap(dev, qmgr->reg_status);
1422 			if (!IS_ERR(qmgr->reg_config))
1423 				devm_iounmap(dev, qmgr->reg_config);
1424 			if (!IS_ERR(qmgr->reg_region))
1425 				devm_iounmap(dev, qmgr->reg_region);
1426 			if (!IS_ERR(qmgr->reg_push))
1427 				devm_iounmap(dev, qmgr->reg_push);
1428 			if (!IS_ERR(qmgr->reg_pop))
1429 				devm_iounmap(dev, qmgr->reg_pop);
1430 			devm_kfree(dev, qmgr);
1431 			continue;
1432 		}
1433 
1434 		list_add_tail(&qmgr->list, &kdev->qmgrs);
1435 		dev_info(dev, "added qmgr start queue %d, num of queues %d, reg_peek %p, reg_status %p, reg_config %p, reg_region %p, reg_push %p, reg_pop %p\n",
1436 			 qmgr->start_queue, qmgr->num_queues,
1437 			 qmgr->reg_peek, qmgr->reg_status,
1438 			 qmgr->reg_config, qmgr->reg_region,
1439 			 qmgr->reg_push, qmgr->reg_pop);
1440 	}
1441 	return 0;
1442 }
1443 
knav_queue_init_pdsps(struct knav_device * kdev,struct device_node * pdsps)1444 static int knav_queue_init_pdsps(struct knav_device *kdev,
1445 					struct device_node *pdsps)
1446 {
1447 	struct device *dev = kdev->dev;
1448 	struct knav_pdsp_info *pdsp;
1449 	struct device_node *child;
1450 
1451 	for_each_child_of_node(pdsps, child) {
1452 		pdsp = devm_kzalloc(dev, sizeof(*pdsp), GFP_KERNEL);
1453 		if (!pdsp) {
1454 			dev_err(dev, "out of memory allocating pdsp\n");
1455 			return -ENOMEM;
1456 		}
1457 		pdsp->name = knav_queue_find_name(child);
1458 		pdsp->iram =
1459 			knav_queue_map_reg(kdev, child,
1460 					   KNAV_QUEUE_PDSP_IRAM_REG_INDEX);
1461 		pdsp->regs =
1462 			knav_queue_map_reg(kdev, child,
1463 					   KNAV_QUEUE_PDSP_REGS_REG_INDEX);
1464 		pdsp->intd =
1465 			knav_queue_map_reg(kdev, child,
1466 					   KNAV_QUEUE_PDSP_INTD_REG_INDEX);
1467 		pdsp->command =
1468 			knav_queue_map_reg(kdev, child,
1469 					   KNAV_QUEUE_PDSP_CMD_REG_INDEX);
1470 
1471 		if (IS_ERR(pdsp->command) || IS_ERR(pdsp->iram) ||
1472 		    IS_ERR(pdsp->regs) || IS_ERR(pdsp->intd)) {
1473 			dev_err(dev, "failed to map pdsp %s regs\n",
1474 				pdsp->name);
1475 			if (!IS_ERR(pdsp->command))
1476 				devm_iounmap(dev, pdsp->command);
1477 			if (!IS_ERR(pdsp->iram))
1478 				devm_iounmap(dev, pdsp->iram);
1479 			if (!IS_ERR(pdsp->regs))
1480 				devm_iounmap(dev, pdsp->regs);
1481 			if (!IS_ERR(pdsp->intd))
1482 				devm_iounmap(dev, pdsp->intd);
1483 			devm_kfree(dev, pdsp);
1484 			continue;
1485 		}
1486 		of_property_read_u32(child, "id", &pdsp->id);
1487 		list_add_tail(&pdsp->list, &kdev->pdsps);
1488 		dev_dbg(dev, "added pdsp %s: command %p, iram %p, regs %p, intd %p\n",
1489 			pdsp->name, pdsp->command, pdsp->iram, pdsp->regs,
1490 			pdsp->intd);
1491 	}
1492 	return 0;
1493 }
1494 
knav_queue_stop_pdsp(struct knav_device * kdev,struct knav_pdsp_info * pdsp)1495 static int knav_queue_stop_pdsp(struct knav_device *kdev,
1496 			  struct knav_pdsp_info *pdsp)
1497 {
1498 	u32 val, timeout = 1000;
1499 	int ret;
1500 
1501 	val = readl_relaxed(&pdsp->regs->control) & ~PDSP_CTRL_ENABLE;
1502 	writel_relaxed(val, &pdsp->regs->control);
1503 	ret = knav_queue_pdsp_wait(&pdsp->regs->control, timeout,
1504 					PDSP_CTRL_RUNNING);
1505 	if (ret < 0) {
1506 		dev_err(kdev->dev, "timed out on pdsp %s stop\n", pdsp->name);
1507 		return ret;
1508 	}
1509 	pdsp->loaded = false;
1510 	pdsp->started = false;
1511 	return 0;
1512 }
1513 
knav_queue_load_pdsp(struct knav_device * kdev,struct knav_pdsp_info * pdsp)1514 static int knav_queue_load_pdsp(struct knav_device *kdev,
1515 			  struct knav_pdsp_info *pdsp)
1516 {
1517 	int i, ret, fwlen;
1518 	const struct firmware *fw;
1519 	bool found = false;
1520 	u32 *fwdata;
1521 
1522 	for (i = 0; i < ARRAY_SIZE(knav_acc_firmwares); i++) {
1523 		if (knav_acc_firmwares[i]) {
1524 			ret = request_firmware_direct(&fw,
1525 						      knav_acc_firmwares[i],
1526 						      kdev->dev);
1527 			if (!ret) {
1528 				found = true;
1529 				break;
1530 			}
1531 		}
1532 	}
1533 
1534 	if (!found) {
1535 		dev_err(kdev->dev, "failed to get firmware for pdsp\n");
1536 		return -ENODEV;
1537 	}
1538 
1539 	dev_info(kdev->dev, "firmware file %s downloaded for PDSP\n",
1540 		 knav_acc_firmwares[i]);
1541 
1542 	writel_relaxed(pdsp->id + 1, pdsp->command + 0x18);
1543 	/* download the firmware */
1544 	fwdata = (u32 *)fw->data;
1545 	fwlen = (fw->size + sizeof(u32) - 1) / sizeof(u32);
1546 	for (i = 0; i < fwlen; i++)
1547 		writel_relaxed(be32_to_cpu(fwdata[i]), pdsp->iram + i);
1548 
1549 	release_firmware(fw);
1550 	return 0;
1551 }
1552 
knav_queue_start_pdsp(struct knav_device * kdev,struct knav_pdsp_info * pdsp)1553 static int knav_queue_start_pdsp(struct knav_device *kdev,
1554 			   struct knav_pdsp_info *pdsp)
1555 {
1556 	u32 val, timeout = 1000;
1557 	int ret;
1558 
1559 	/* write a command for sync */
1560 	writel_relaxed(0xffffffff, pdsp->command);
1561 	while (readl_relaxed(pdsp->command) != 0xffffffff)
1562 		cpu_relax();
1563 
1564 	/* soft reset the PDSP */
1565 	val  = readl_relaxed(&pdsp->regs->control);
1566 	val &= ~(PDSP_CTRL_PC_MASK | PDSP_CTRL_SOFT_RESET);
1567 	writel_relaxed(val, &pdsp->regs->control);
1568 
1569 	/* enable pdsp */
1570 	val = readl_relaxed(&pdsp->regs->control) | PDSP_CTRL_ENABLE;
1571 	writel_relaxed(val, &pdsp->regs->control);
1572 
1573 	/* wait for command register to clear */
1574 	ret = knav_queue_pdsp_wait(pdsp->command, timeout, 0);
1575 	if (ret < 0) {
1576 		dev_err(kdev->dev,
1577 			"timed out on pdsp %s command register wait\n",
1578 			pdsp->name);
1579 		return ret;
1580 	}
1581 	return 0;
1582 }
1583 
knav_queue_stop_pdsps(struct knav_device * kdev)1584 static void knav_queue_stop_pdsps(struct knav_device *kdev)
1585 {
1586 	struct knav_pdsp_info *pdsp;
1587 
1588 	/* disable all pdsps */
1589 	for_each_pdsp(kdev, pdsp)
1590 		knav_queue_stop_pdsp(kdev, pdsp);
1591 }
1592 
knav_queue_start_pdsps(struct knav_device * kdev)1593 static int knav_queue_start_pdsps(struct knav_device *kdev)
1594 {
1595 	struct knav_pdsp_info *pdsp;
1596 	int ret;
1597 
1598 	knav_queue_stop_pdsps(kdev);
1599 	/* now load them all. We return success even if pdsp
1600 	 * is not loaded as acc channels are optional on having
1601 	 * firmware availability in the system. We set the loaded
1602 	 * and stated flag and when initialize the acc range, check
1603 	 * it and init the range only if pdsp is started.
1604 	 */
1605 	for_each_pdsp(kdev, pdsp) {
1606 		ret = knav_queue_load_pdsp(kdev, pdsp);
1607 		if (!ret)
1608 			pdsp->loaded = true;
1609 	}
1610 
1611 	for_each_pdsp(kdev, pdsp) {
1612 		if (pdsp->loaded) {
1613 			ret = knav_queue_start_pdsp(kdev, pdsp);
1614 			if (!ret)
1615 				pdsp->started = true;
1616 		}
1617 	}
1618 	return 0;
1619 }
1620 
knav_find_qmgr(unsigned id)1621 static inline struct knav_qmgr_info *knav_find_qmgr(unsigned id)
1622 {
1623 	struct knav_qmgr_info *qmgr;
1624 
1625 	for_each_qmgr(kdev, qmgr) {
1626 		if ((id >= qmgr->start_queue) &&
1627 		    (id < qmgr->start_queue + qmgr->num_queues))
1628 			return qmgr;
1629 	}
1630 	return NULL;
1631 }
1632 
knav_queue_init_queue(struct knav_device * kdev,struct knav_range_info * range,struct knav_queue_inst * inst,unsigned id)1633 static int knav_queue_init_queue(struct knav_device *kdev,
1634 					struct knav_range_info *range,
1635 					struct knav_queue_inst *inst,
1636 					unsigned id)
1637 {
1638 	char irq_name[KNAV_NAME_SIZE];
1639 	inst->qmgr = knav_find_qmgr(id);
1640 	if (!inst->qmgr)
1641 		return -1;
1642 
1643 	INIT_LIST_HEAD(&inst->handles);
1644 	inst->kdev = kdev;
1645 	inst->range = range;
1646 	inst->irq_num = -1;
1647 	inst->id = id;
1648 	scnprintf(irq_name, sizeof(irq_name), "hwqueue-%d", id);
1649 	inst->irq_name = kstrndup(irq_name, sizeof(irq_name), GFP_KERNEL);
1650 
1651 	if (range->ops && range->ops->init_queue)
1652 		return range->ops->init_queue(range, inst);
1653 	else
1654 		return 0;
1655 }
1656 
knav_queue_init_queues(struct knav_device * kdev)1657 static int knav_queue_init_queues(struct knav_device *kdev)
1658 {
1659 	struct knav_range_info *range;
1660 	int size, id, base_idx;
1661 	int idx = 0, ret = 0;
1662 
1663 	/* how much do we need for instance data? */
1664 	size = sizeof(struct knav_queue_inst);
1665 
1666 	/* round this up to a power of 2, keep the index to instance
1667 	 * arithmetic fast.
1668 	 * */
1669 	kdev->inst_shift = order_base_2(size);
1670 	size = (1 << kdev->inst_shift) * kdev->num_queues_in_use;
1671 	kdev->instances = devm_kzalloc(kdev->dev, size, GFP_KERNEL);
1672 	if (!kdev->instances)
1673 		return -ENOMEM;
1674 
1675 	for_each_queue_range(kdev, range) {
1676 		if (range->ops && range->ops->init_range)
1677 			range->ops->init_range(range);
1678 		base_idx = idx;
1679 		for (id = range->queue_base;
1680 		     id < range->queue_base + range->num_queues; id++, idx++) {
1681 			ret = knav_queue_init_queue(kdev, range,
1682 					knav_queue_idx_to_inst(kdev, idx), id);
1683 			if (ret < 0)
1684 				return ret;
1685 		}
1686 		range->queue_base_inst =
1687 			knav_queue_idx_to_inst(kdev, base_idx);
1688 	}
1689 	return 0;
1690 }
1691 
knav_queue_probe(struct platform_device * pdev)1692 static int knav_queue_probe(struct platform_device *pdev)
1693 {
1694 	struct device_node *node = pdev->dev.of_node;
1695 	struct device_node *qmgrs, *queue_pools, *regions, *pdsps;
1696 	struct device *dev = &pdev->dev;
1697 	u32 temp[2];
1698 	int ret;
1699 
1700 	if (!node) {
1701 		dev_err(dev, "device tree info unavailable\n");
1702 		return -ENODEV;
1703 	}
1704 
1705 	kdev = devm_kzalloc(dev, sizeof(struct knav_device), GFP_KERNEL);
1706 	if (!kdev) {
1707 		dev_err(dev, "memory allocation failed\n");
1708 		return -ENOMEM;
1709 	}
1710 
1711 	platform_set_drvdata(pdev, kdev);
1712 	kdev->dev = dev;
1713 	INIT_LIST_HEAD(&kdev->queue_ranges);
1714 	INIT_LIST_HEAD(&kdev->qmgrs);
1715 	INIT_LIST_HEAD(&kdev->pools);
1716 	INIT_LIST_HEAD(&kdev->regions);
1717 	INIT_LIST_HEAD(&kdev->pdsps);
1718 
1719 	pm_runtime_enable(&pdev->dev);
1720 	ret = pm_runtime_get_sync(&pdev->dev);
1721 	if (ret < 0) {
1722 		dev_err(dev, "Failed to enable QMSS\n");
1723 		return ret;
1724 	}
1725 
1726 	if (of_property_read_u32_array(node, "queue-range", temp, 2)) {
1727 		dev_err(dev, "queue-range not specified\n");
1728 		ret = -ENODEV;
1729 		goto err;
1730 	}
1731 	kdev->base_id    = temp[0];
1732 	kdev->num_queues = temp[1];
1733 
1734 	/* Initialize queue managers using device tree configuration */
1735 	qmgrs =  of_get_child_by_name(node, "qmgrs");
1736 	if (!qmgrs) {
1737 		dev_err(dev, "queue manager info not specified\n");
1738 		ret = -ENODEV;
1739 		goto err;
1740 	}
1741 	ret = knav_queue_init_qmgrs(kdev, qmgrs);
1742 	of_node_put(qmgrs);
1743 	if (ret)
1744 		goto err;
1745 
1746 	/* get pdsp configuration values from device tree */
1747 	pdsps =  of_get_child_by_name(node, "pdsps");
1748 	if (pdsps) {
1749 		ret = knav_queue_init_pdsps(kdev, pdsps);
1750 		if (ret)
1751 			goto err;
1752 
1753 		ret = knav_queue_start_pdsps(kdev);
1754 		if (ret)
1755 			goto err;
1756 	}
1757 	of_node_put(pdsps);
1758 
1759 	/* get usable queue range values from device tree */
1760 	queue_pools = of_get_child_by_name(node, "queue-pools");
1761 	if (!queue_pools) {
1762 		dev_err(dev, "queue-pools not specified\n");
1763 		ret = -ENODEV;
1764 		goto err;
1765 	}
1766 	ret = knav_setup_queue_pools(kdev, queue_pools);
1767 	of_node_put(queue_pools);
1768 	if (ret)
1769 		goto err;
1770 
1771 	ret = knav_get_link_ram(kdev, "linkram0", &kdev->link_rams[0]);
1772 	if (ret) {
1773 		dev_err(kdev->dev, "could not setup linking ram\n");
1774 		goto err;
1775 	}
1776 
1777 	ret = knav_get_link_ram(kdev, "linkram1", &kdev->link_rams[1]);
1778 	if (ret) {
1779 		/*
1780 		 * nothing really, we have one linking ram already, so we just
1781 		 * live within our means
1782 		 */
1783 	}
1784 
1785 	ret = knav_queue_setup_link_ram(kdev);
1786 	if (ret)
1787 		goto err;
1788 
1789 	regions =  of_get_child_by_name(node, "descriptor-regions");
1790 	if (!regions) {
1791 		dev_err(dev, "descriptor-regions not specified\n");
1792 		goto err;
1793 	}
1794 	ret = knav_queue_setup_regions(kdev, regions);
1795 	of_node_put(regions);
1796 	if (ret)
1797 		goto err;
1798 
1799 	ret = knav_queue_init_queues(kdev);
1800 	if (ret < 0) {
1801 		dev_err(dev, "hwqueue initialization failed\n");
1802 		goto err;
1803 	}
1804 
1805 	debugfs_create_file("qmss", S_IFREG | S_IRUGO, NULL, NULL,
1806 			    &knav_queue_debug_ops);
1807 	return 0;
1808 
1809 err:
1810 	knav_queue_stop_pdsps(kdev);
1811 	knav_queue_free_regions(kdev);
1812 	knav_free_queue_ranges(kdev);
1813 	pm_runtime_put_sync(&pdev->dev);
1814 	pm_runtime_disable(&pdev->dev);
1815 	return ret;
1816 }
1817 
knav_queue_remove(struct platform_device * pdev)1818 static int knav_queue_remove(struct platform_device *pdev)
1819 {
1820 	/* TODO: Free resources */
1821 	pm_runtime_put_sync(&pdev->dev);
1822 	pm_runtime_disable(&pdev->dev);
1823 	return 0;
1824 }
1825 
1826 /* Match table for of_platform binding */
1827 static struct of_device_id keystone_qmss_of_match[] = {
1828 	{ .compatible = "ti,keystone-navigator-qmss", },
1829 	{},
1830 };
1831 MODULE_DEVICE_TABLE(of, keystone_qmss_of_match);
1832 
1833 static struct platform_driver keystone_qmss_driver = {
1834 	.probe		= knav_queue_probe,
1835 	.remove		= knav_queue_remove,
1836 	.driver		= {
1837 		.name	= "keystone-navigator-qmss",
1838 		.of_match_table = keystone_qmss_of_match,
1839 	},
1840 };
1841 module_platform_driver(keystone_qmss_driver);
1842 
1843 MODULE_LICENSE("GPL v2");
1844 MODULE_DESCRIPTION("TI QMSS driver for Keystone SOCs");
1845 MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com>");
1846 MODULE_AUTHOR("Santosh Shilimkar <santosh.shilimkar@ti.com>");
1847