• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  /*
2   * CAAM/SEC 4.x transport/backend driver
3   * JobR backend functionality
4   *
5   * Copyright 2008-2012 Freescale Semiconductor, Inc.
6   */
7  
8  #include <linux/of_irq.h>
9  #include <linux/of_address.h>
10  
11  #include "compat.h"
12  #include "regs.h"
13  #include "jr.h"
14  #include "desc.h"
15  #include "intern.h"
16  
17  struct jr_driver_data {
18  	/* List of Physical JobR's with the Driver */
19  	struct list_head	jr_list;
20  	spinlock_t		jr_alloc_lock;	/* jr_list lock */
21  } ____cacheline_aligned;
22  
23  static struct jr_driver_data driver_data;
24  
caam_reset_hw_jr(struct device * dev)25  static int caam_reset_hw_jr(struct device *dev)
26  {
27  	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
28  	unsigned int timeout = 100000;
29  
30  	/*
31  	 * mask interrupts since we are going to poll
32  	 * for reset completion status
33  	 */
34  	setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
35  
36  	/* initiate flush (required prior to reset) */
37  	wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
38  	while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) ==
39  		JRINT_ERR_HALT_INPROGRESS) && --timeout)
40  		cpu_relax();
41  
42  	if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) !=
43  	    JRINT_ERR_HALT_COMPLETE || timeout == 0) {
44  		dev_err(dev, "failed to flush job ring %d\n", jrp->ridx);
45  		return -EIO;
46  	}
47  
48  	/* initiate reset */
49  	timeout = 100000;
50  	wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
51  	while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout)
52  		cpu_relax();
53  
54  	if (timeout == 0) {
55  		dev_err(dev, "failed to reset job ring %d\n", jrp->ridx);
56  		return -EIO;
57  	}
58  
59  	/* unmask interrupts */
60  	clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
61  
62  	return 0;
63  }
64  
65  /*
66   * Shutdown JobR independent of platform property code
67   */
caam_jr_shutdown(struct device * dev)68  int caam_jr_shutdown(struct device *dev)
69  {
70  	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
71  	dma_addr_t inpbusaddr, outbusaddr;
72  	int ret;
73  
74  	ret = caam_reset_hw_jr(dev);
75  
76  	tasklet_kill(&jrp->irqtask);
77  
78  	/* Release interrupt */
79  	free_irq(jrp->irq, dev);
80  
81  	/* Free rings */
82  	inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
83  	outbusaddr = rd_reg64(&jrp->rregs->outring_base);
84  	dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
85  			  jrp->inpring, inpbusaddr);
86  	dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH,
87  			  jrp->outring, outbusaddr);
88  	kfree(jrp->entinfo);
89  
90  	return ret;
91  }
92  
caam_jr_remove(struct platform_device * pdev)93  static int caam_jr_remove(struct platform_device *pdev)
94  {
95  	int ret;
96  	struct device *jrdev;
97  	struct caam_drv_private_jr *jrpriv;
98  
99  	jrdev = &pdev->dev;
100  	jrpriv = dev_get_drvdata(jrdev);
101  
102  	/*
103  	 * Return EBUSY if job ring already allocated.
104  	 */
105  	if (atomic_read(&jrpriv->tfm_count)) {
106  		dev_err(jrdev, "Device is busy\n");
107  		return -EBUSY;
108  	}
109  
110  	/* Remove the node from Physical JobR list maintained by driver */
111  	spin_lock(&driver_data.jr_alloc_lock);
112  	list_del(&jrpriv->list_node);
113  	spin_unlock(&driver_data.jr_alloc_lock);
114  
115  	/* Release ring */
116  	ret = caam_jr_shutdown(jrdev);
117  	if (ret)
118  		dev_err(jrdev, "Failed to shut down job ring\n");
119  	irq_dispose_mapping(jrpriv->irq);
120  
121  	return ret;
122  }
123  
124  /* Main per-ring interrupt handler */
caam_jr_interrupt(int irq,void * st_dev)125  static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
126  {
127  	struct device *dev = st_dev;
128  	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
129  	u32 irqstate;
130  
131  	/*
132  	 * Check the output ring for ready responses, kick
133  	 * tasklet if jobs done.
134  	 */
135  	irqstate = rd_reg32(&jrp->rregs->jrintstatus);
136  	if (!irqstate)
137  		return IRQ_NONE;
138  
139  	/*
140  	 * If JobR error, we got more development work to do
141  	 * Flag a bug now, but we really need to shut down and
142  	 * restart the queue (and fix code).
143  	 */
144  	if (irqstate & JRINT_JR_ERROR) {
145  		dev_err(dev, "job ring error: irqstate: %08x\n", irqstate);
146  		BUG();
147  	}
148  
149  	/* mask valid interrupts */
150  	setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
151  
152  	/* Have valid interrupt at this point, just ACK and trigger */
153  	wr_reg32(&jrp->rregs->jrintstatus, irqstate);
154  
155  	preempt_disable();
156  	tasklet_schedule(&jrp->irqtask);
157  	preempt_enable();
158  
159  	return IRQ_HANDLED;
160  }
161  
162  /* Deferred service handler, run as interrupt-fired tasklet */
caam_jr_dequeue(unsigned long devarg)163  static void caam_jr_dequeue(unsigned long devarg)
164  {
165  	int hw_idx, sw_idx, i, head, tail;
166  	struct device *dev = (struct device *)devarg;
167  	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
168  	void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
169  	u32 *userdesc, userstatus;
170  	void *userarg;
171  
172  	while (rd_reg32(&jrp->rregs->outring_used)) {
173  
174  		head = ACCESS_ONCE(jrp->head);
175  
176  		spin_lock(&jrp->outlock);
177  
178  		sw_idx = tail = jrp->tail;
179  		hw_idx = jrp->out_ring_read_index;
180  
181  		for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) {
182  			sw_idx = (tail + i) & (JOBR_DEPTH - 1);
183  
184  			if (jrp->outring[hw_idx].desc ==
185  			    jrp->entinfo[sw_idx].desc_addr_dma)
186  				break; /* found */
187  		}
188  		/* we should never fail to find a matching descriptor */
189  		BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0);
190  
191  		/* Unmap just-run descriptor so we can post-process */
192  		dma_unmap_single(dev, jrp->outring[hw_idx].desc,
193  				 jrp->entinfo[sw_idx].desc_size,
194  				 DMA_TO_DEVICE);
195  
196  		/* mark completed, avoid matching on a recycled desc addr */
197  		jrp->entinfo[sw_idx].desc_addr_dma = 0;
198  
199  		/* Stash callback params for use outside of lock */
200  		usercall = jrp->entinfo[sw_idx].callbk;
201  		userarg = jrp->entinfo[sw_idx].cbkarg;
202  		userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
203  		userstatus = jrp->outring[hw_idx].jrstatus;
204  
205  		/*
206  		 * Make sure all information from the job has been obtained
207  		 * before telling CAAM that the job has been removed from the
208  		 * output ring.
209  		 */
210  		mb();
211  
212  		/* set done */
213  		wr_reg32(&jrp->rregs->outring_rmvd, 1);
214  
215  		jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) &
216  					   (JOBR_DEPTH - 1);
217  
218  		/*
219  		 * if this job completed out-of-order, do not increment
220  		 * the tail.  Otherwise, increment tail by 1 plus the
221  		 * number of subsequent jobs already completed out-of-order
222  		 */
223  		if (sw_idx == tail) {
224  			do {
225  				tail = (tail + 1) & (JOBR_DEPTH - 1);
226  			} while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 &&
227  				 jrp->entinfo[tail].desc_addr_dma == 0);
228  
229  			jrp->tail = tail;
230  		}
231  
232  		spin_unlock(&jrp->outlock);
233  
234  		/* Finally, execute user's callback */
235  		usercall(dev, userdesc, userstatus, userarg);
236  	}
237  
238  	/* reenable / unmask IRQs */
239  	clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
240  }
241  
242  /**
243   * caam_jr_alloc() - Alloc a job ring for someone to use as needed.
244   *
245   * returns :  pointer to the newly allocated physical
246   *	      JobR dev can be written to if successful.
247   **/
caam_jr_alloc(void)248  struct device *caam_jr_alloc(void)
249  {
250  	struct caam_drv_private_jr *jrpriv, *min_jrpriv = NULL;
251  	struct device *dev = ERR_PTR(-ENODEV);
252  	int min_tfm_cnt	= INT_MAX;
253  	int tfm_cnt;
254  
255  	spin_lock(&driver_data.jr_alloc_lock);
256  
257  	if (list_empty(&driver_data.jr_list)) {
258  		spin_unlock(&driver_data.jr_alloc_lock);
259  		return ERR_PTR(-ENODEV);
260  	}
261  
262  	list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) {
263  		tfm_cnt = atomic_read(&jrpriv->tfm_count);
264  		if (tfm_cnt < min_tfm_cnt) {
265  			min_tfm_cnt = tfm_cnt;
266  			min_jrpriv = jrpriv;
267  		}
268  		if (!min_tfm_cnt)
269  			break;
270  	}
271  
272  	if (min_jrpriv) {
273  		atomic_inc(&min_jrpriv->tfm_count);
274  		dev = min_jrpriv->dev;
275  	}
276  	spin_unlock(&driver_data.jr_alloc_lock);
277  
278  	return dev;
279  }
280  EXPORT_SYMBOL(caam_jr_alloc);
281  
282  /**
283   * caam_jr_free() - Free the Job Ring
284   * @rdev     - points to the dev that identifies the Job ring to
285   *             be released.
286   **/
caam_jr_free(struct device * rdev)287  void caam_jr_free(struct device *rdev)
288  {
289  	struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev);
290  
291  	atomic_dec(&jrpriv->tfm_count);
292  }
293  EXPORT_SYMBOL(caam_jr_free);
294  
295  /**
296   * caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK,
297   * -EBUSY if the queue is full, -EIO if it cannot map the caller's
298   * descriptor.
299   * @dev:  device of the job ring to be used. This device should have
300   *        been assigned prior by caam_jr_register().
301   * @desc: points to a job descriptor that execute our request. All
302   *        descriptors (and all referenced data) must be in a DMAable
303   *        region, and all data references must be physical addresses
304   *        accessible to CAAM (i.e. within a PAMU window granted
305   *        to it).
306   * @cbk:  pointer to a callback function to be invoked upon completion
307   *        of this request. This has the form:
308   *        callback(struct device *dev, u32 *desc, u32 stat, void *arg)
309   *        where:
310   *        @dev:    contains the job ring device that processed this
311   *                 response.
312   *        @desc:   descriptor that initiated the request, same as
313   *                 "desc" being argued to caam_jr_enqueue().
314   *        @status: untranslated status received from CAAM. See the
315   *                 reference manual for a detailed description of
316   *                 error meaning, or see the JRSTA definitions in the
317   *                 register header file
318   *        @areq:   optional pointer to an argument passed with the
319   *                 original request
320   * @areq: optional pointer to a user argument for use at callback
321   *        time.
322   **/
caam_jr_enqueue(struct device * dev,u32 * desc,void (* cbk)(struct device * dev,u32 * desc,u32 status,void * areq),void * areq)323  int caam_jr_enqueue(struct device *dev, u32 *desc,
324  		    void (*cbk)(struct device *dev, u32 *desc,
325  				u32 status, void *areq),
326  		    void *areq)
327  {
328  	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
329  	struct caam_jrentry_info *head_entry;
330  	int head, tail, desc_size;
331  	dma_addr_t desc_dma;
332  
333  	desc_size = (*desc & HDR_JD_LENGTH_MASK) * sizeof(u32);
334  	desc_dma = dma_map_single(dev, desc, desc_size, DMA_TO_DEVICE);
335  	if (dma_mapping_error(dev, desc_dma)) {
336  		dev_err(dev, "caam_jr_enqueue(): can't map jobdesc\n");
337  		return -EIO;
338  	}
339  
340  	spin_lock_bh(&jrp->inplock);
341  
342  	head = jrp->head;
343  	tail = ACCESS_ONCE(jrp->tail);
344  
345  	if (!rd_reg32(&jrp->rregs->inpring_avail) ||
346  	    CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
347  		spin_unlock_bh(&jrp->inplock);
348  		dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE);
349  		return -EBUSY;
350  	}
351  
352  	head_entry = &jrp->entinfo[head];
353  	head_entry->desc_addr_virt = desc;
354  	head_entry->desc_size = desc_size;
355  	head_entry->callbk = (void *)cbk;
356  	head_entry->cbkarg = areq;
357  	head_entry->desc_addr_dma = desc_dma;
358  
359  	jrp->inpring[jrp->inp_ring_write_index] = desc_dma;
360  
361  	/*
362  	 * Guarantee that the descriptor's DMA address has been written to
363  	 * the next slot in the ring before the write index is updated, since
364  	 * other cores may update this index independently.
365  	 */
366  	smp_wmb();
367  
368  	jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) &
369  				    (JOBR_DEPTH - 1);
370  	jrp->head = (head + 1) & (JOBR_DEPTH - 1);
371  
372  	/*
373  	 * Ensure that all job information has been written before
374  	 * notifying CAAM that a new job was added to the input ring.
375  	 */
376  	wmb();
377  
378  	wr_reg32(&jrp->rregs->inpring_jobadd, 1);
379  
380  	spin_unlock_bh(&jrp->inplock);
381  
382  	return 0;
383  }
384  EXPORT_SYMBOL(caam_jr_enqueue);
385  
386  /*
387   * Init JobR independent of platform property detection
388   */
caam_jr_init(struct device * dev)389  static int caam_jr_init(struct device *dev)
390  {
391  	struct caam_drv_private_jr *jrp;
392  	dma_addr_t inpbusaddr, outbusaddr;
393  	int i, error;
394  
395  	jrp = dev_get_drvdata(dev);
396  
397  	tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
398  
399  	/* Connect job ring interrupt handler. */
400  	error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
401  			    dev_name(dev), dev);
402  	if (error) {
403  		dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
404  			jrp->ridx, jrp->irq);
405  		goto out_kill_deq;
406  	}
407  
408  	error = caam_reset_hw_jr(dev);
409  	if (error)
410  		goto out_free_irq;
411  
412  	error = -ENOMEM;
413  	jrp->inpring = dma_alloc_coherent(dev, sizeof(*jrp->inpring) *
414  					  JOBR_DEPTH, &inpbusaddr, GFP_KERNEL);
415  	if (!jrp->inpring)
416  		goto out_free_irq;
417  
418  	jrp->outring = dma_alloc_coherent(dev, sizeof(*jrp->outring) *
419  					  JOBR_DEPTH, &outbusaddr, GFP_KERNEL);
420  	if (!jrp->outring)
421  		goto out_free_inpring;
422  
423  	jrp->entinfo = kcalloc(JOBR_DEPTH, sizeof(*jrp->entinfo), GFP_KERNEL);
424  	if (!jrp->entinfo)
425  		goto out_free_outring;
426  
427  	for (i = 0; i < JOBR_DEPTH; i++)
428  		jrp->entinfo[i].desc_addr_dma = !0;
429  
430  	/* Setup rings */
431  	jrp->inp_ring_write_index = 0;
432  	jrp->out_ring_read_index = 0;
433  	jrp->head = 0;
434  	jrp->tail = 0;
435  
436  	wr_reg64(&jrp->rregs->inpring_base, inpbusaddr);
437  	wr_reg64(&jrp->rregs->outring_base, outbusaddr);
438  	wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH);
439  	wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH);
440  
441  	jrp->ringsize = JOBR_DEPTH;
442  
443  	spin_lock_init(&jrp->inplock);
444  	spin_lock_init(&jrp->outlock);
445  
446  	/* Select interrupt coalescing parameters */
447  	setbits32(&jrp->rregs->rconfig_lo, JOBR_INTC |
448  		  (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) |
449  		  (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
450  
451  	return 0;
452  
453  out_free_outring:
454  	dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH,
455  			  jrp->outring, outbusaddr);
456  out_free_inpring:
457  	dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
458  			  jrp->inpring, inpbusaddr);
459  	dev_err(dev, "can't allocate job rings for %d\n", jrp->ridx);
460  out_free_irq:
461  	free_irq(jrp->irq, dev);
462  out_kill_deq:
463  	tasklet_kill(&jrp->irqtask);
464  	return error;
465  }
466  
467  
468  /*
469   * Probe routine for each detected JobR subsystem.
470   */
caam_jr_probe(struct platform_device * pdev)471  static int caam_jr_probe(struct platform_device *pdev)
472  {
473  	struct device *jrdev;
474  	struct device_node *nprop;
475  	struct caam_job_ring __iomem *ctrl;
476  	struct caam_drv_private_jr *jrpriv;
477  	static int total_jobrs;
478  	int error;
479  
480  	jrdev = &pdev->dev;
481  	jrpriv = devm_kmalloc(jrdev, sizeof(*jrpriv), GFP_KERNEL);
482  	if (!jrpriv)
483  		return -ENOMEM;
484  
485  	dev_set_drvdata(jrdev, jrpriv);
486  
487  	/* save ring identity relative to detection */
488  	jrpriv->ridx = total_jobrs++;
489  
490  	nprop = pdev->dev.of_node;
491  	/* Get configuration properties from device tree */
492  	/* First, get register page */
493  	ctrl = of_iomap(nprop, 0);
494  	if (!ctrl) {
495  		dev_err(jrdev, "of_iomap() failed\n");
496  		return -ENOMEM;
497  	}
498  
499  	jrpriv->rregs = (struct caam_job_ring __force *)ctrl;
500  
501  	if (sizeof(dma_addr_t) == sizeof(u64))
502  		if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring"))
503  			dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(40));
504  		else
505  			dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(36));
506  	else
507  		dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32));
508  
509  	/* Identify the interrupt */
510  	jrpriv->irq = irq_of_parse_and_map(nprop, 0);
511  
512  	/* Now do the platform independent part */
513  	error = caam_jr_init(jrdev); /* now turn on hardware */
514  	if (error) {
515  		irq_dispose_mapping(jrpriv->irq);
516  		return error;
517  	}
518  
519  	jrpriv->dev = jrdev;
520  	spin_lock(&driver_data.jr_alloc_lock);
521  	list_add_tail(&jrpriv->list_node, &driver_data.jr_list);
522  	spin_unlock(&driver_data.jr_alloc_lock);
523  
524  	atomic_set(&jrpriv->tfm_count, 0);
525  
526  	return 0;
527  }
528  
529  static struct of_device_id caam_jr_match[] = {
530  	{
531  		.compatible = "fsl,sec-v4.0-job-ring",
532  	},
533  	{
534  		.compatible = "fsl,sec4.0-job-ring",
535  	},
536  	{},
537  };
538  MODULE_DEVICE_TABLE(of, caam_jr_match);
539  
540  static struct platform_driver caam_jr_driver = {
541  	.driver = {
542  		.name = "caam_jr",
543  		.of_match_table = caam_jr_match,
544  	},
545  	.probe       = caam_jr_probe,
546  	.remove      = caam_jr_remove,
547  };
548  
jr_driver_init(void)549  static int __init jr_driver_init(void)
550  {
551  	spin_lock_init(&driver_data.jr_alloc_lock);
552  	INIT_LIST_HEAD(&driver_data.jr_list);
553  	return platform_driver_register(&caam_jr_driver);
554  }
555  
jr_driver_exit(void)556  static void __exit jr_driver_exit(void)
557  {
558  	platform_driver_unregister(&caam_jr_driver);
559  }
560  
561  module_init(jr_driver_init);
562  module_exit(jr_driver_exit);
563  
564  MODULE_LICENSE("GPL");
565  MODULE_DESCRIPTION("FSL CAAM JR request backend");
566  MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
567