• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, see <http://www.gnu.org/licenses/>.
15  */
16 
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 
20 #include <linux/crypto.h>
21 #include <crypto/algapi.h>
22 #include <crypto/aes.h>
23 #include <crypto/sha.h>
24 #include <crypto/aead.h>
25 #include <crypto/authenc.h>
26 #include <crypto/scatterwalk.h>
27 #include <crypto/internal/skcipher.h>
28 
29 #include <linux/init.h>
30 #include <linux/moduleparam.h>
31 #include <linux/types.h>
32 #include <linux/random.h>
33 #include <linux/ioport.h>
34 #include <linux/interrupt.h>
35 #include <linux/fcntl.h>
36 #include <linux/poll.h>
37 #include <linux/proc_fs.h>
38 #include <linux/mutex.h>
39 #include <linux/sysctl.h>
40 #include <linux/fs.h>
41 #include <linux/cdev.h>
42 #include <linux/platform_device.h>
43 #include <linux/mm.h>
44 #include <linux/delay.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/dmapool.h>
47 #include <linux/list.h>
48 #include <linux/slab.h>
49 #include <linux/spinlock.h>
50 #include <linux/pm.h>
51 
52 /* cache.h required for L1_CACHE_ALIGN() and cache_line_size() */
53 #include <linux/cache.h>
54 #include <linux/io.h>
55 #include <linux/uaccess.h>
56 #include <linux/pagemap.h>
57 #include <linux/sched.h>
58 #include <linux/random.h>
59 #include <linux/of.h>
60 #include <linux/clk.h>
61 #include <linux/of_address.h>
62 
63 #include "ssi_config.h"
64 #include "ssi_driver.h"
65 #include "ssi_request_mgr.h"
66 #include "ssi_buffer_mgr.h"
67 #include "ssi_sysfs.h"
68 #include "ssi_cipher.h"
69 #include "ssi_aead.h"
70 #include "ssi_hash.h"
71 #include "ssi_ivgen.h"
72 #include "ssi_sram_mgr.h"
73 #include "ssi_pm.h"
74 #include "ssi_fips.h"
75 
76 #ifdef DX_DUMP_BYTES
dump_byte_array(const char * name,const u8 * the_array,unsigned long size)77 void dump_byte_array(const char *name, const u8 *the_array, unsigned long size)
78 {
79 	int i, line_offset = 0, ret = 0;
80 	const u8 *cur_byte;
81 	char line_buf[80];
82 
83 	if (!the_array) {
84 		SSI_LOG_ERR("cannot dump array - NULL pointer\n");
85 		return;
86 	}
87 
88 	ret = snprintf(line_buf, sizeof(line_buf), "%s[%lu]: ", name, size);
89 	if (ret < 0) {
90 		SSI_LOG_ERR("snprintf returned %d . aborting buffer array dump\n", ret);
91 		return;
92 	}
93 	line_offset = ret;
94 	for (i = 0, cur_byte = the_array;
95 	     (i < size) && (line_offset < sizeof(line_buf)); i++, cur_byte++) {
96 			ret = snprintf(line_buf + line_offset,
97 				       sizeof(line_buf) - line_offset,
98 				       "0x%02X ", *cur_byte);
99 		if (ret < 0) {
100 			SSI_LOG_ERR("snprintf returned %d . aborting buffer array dump\n", ret);
101 			return;
102 		}
103 		line_offset += ret;
104 		if (line_offset > 75) { /* Cut before line end */
105 			SSI_LOG_DEBUG("%s\n", line_buf);
106 			line_offset = 0;
107 		}
108 	}
109 
110 	if (line_offset > 0) /* Dump remaining line */
111 		SSI_LOG_DEBUG("%s\n", line_buf);
112 }
113 #endif
114 
cc_isr(int irq,void * dev_id)115 static irqreturn_t cc_isr(int irq, void *dev_id)
116 {
117 	struct ssi_drvdata *drvdata = (struct ssi_drvdata *)dev_id;
118 	void __iomem *cc_base = drvdata->cc_base;
119 	u32 irr;
120 	u32 imr;
121 
122 	/* STAT_OP_TYPE_GENERIC STAT_PHASE_0: Interrupt */
123 
124 	/* read the interrupt status */
125 	irr = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRR));
126 	SSI_LOG_DEBUG("Got IRR=0x%08X\n", irr);
127 	if (unlikely(irr == 0)) { /* Probably shared interrupt line */
128 		SSI_LOG_ERR("Got interrupt with empty IRR\n");
129 		return IRQ_NONE;
130 	}
131 	imr = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR));
132 
133 	/* clear interrupt - must be before processing events */
134 	CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_ICR), irr);
135 
136 	drvdata->irq = irr;
137 	/* Completion interrupt - most probable */
138 	if (likely((irr & SSI_COMP_IRQ_MASK) != 0)) {
139 		/* Mask AXI completion interrupt - will be unmasked in Deferred service handler */
140 		CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR), imr | SSI_COMP_IRQ_MASK);
141 		irr &= ~SSI_COMP_IRQ_MASK;
142 		complete_request(drvdata);
143 	}
144 #ifdef CONFIG_CRYPTO_FIPS
145 	/* TEE FIPS interrupt */
146 	if (likely((irr & SSI_GPR0_IRQ_MASK) != 0)) {
147 		/* Mask interrupt - will be unmasked in Deferred service handler */
148 		CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR), imr | SSI_GPR0_IRQ_MASK);
149 		irr &= ~SSI_GPR0_IRQ_MASK;
150 		fips_handler(drvdata);
151 	}
152 #endif
153 	/* AXI error interrupt */
154 	if (unlikely((irr & SSI_AXI_ERR_IRQ_MASK) != 0)) {
155 		u32 axi_err;
156 
157 		/* Read the AXI error ID */
158 		axi_err = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_MON_ERR));
159 		SSI_LOG_DEBUG("AXI completion error: axim_mon_err=0x%08X\n", axi_err);
160 
161 		irr &= ~SSI_AXI_ERR_IRQ_MASK;
162 	}
163 
164 	if (unlikely(irr != 0)) {
165 		SSI_LOG_DEBUG("IRR includes unknown cause bits (0x%08X)\n", irr);
166 		/* Just warning */
167 	}
168 
169 	return IRQ_HANDLED;
170 }
171 
init_cc_regs(struct ssi_drvdata * drvdata,bool is_probe)172 int init_cc_regs(struct ssi_drvdata *drvdata, bool is_probe)
173 {
174 	unsigned int val, cache_params;
175 	void __iomem *cc_base = drvdata->cc_base;
176 
177 	/* Unmask all AXI interrupt sources AXI_CFG1 register */
178 	val = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_CFG));
179 	CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_CFG), val & ~SSI_AXI_IRQ_MASK);
180 	SSI_LOG_DEBUG("AXIM_CFG=0x%08X\n", CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_CFG)));
181 
182 	/* Clear all pending interrupts */
183 	val = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRR));
184 	SSI_LOG_DEBUG("IRR=0x%08X\n", val);
185 	CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_ICR), val);
186 
187 	/* Unmask relevant interrupt cause */
188 	val = (~(SSI_COMP_IRQ_MASK | SSI_AXI_ERR_IRQ_MASK | SSI_GPR0_IRQ_MASK));
189 	CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR), val);
190 
191 #ifdef DX_HOST_IRQ_TIMER_INIT_VAL_REG_OFFSET
192 #ifdef DX_IRQ_DELAY
193 	/* Set CC IRQ delay */
194 	CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRQ_TIMER_INIT_VAL),
195 			      DX_IRQ_DELAY);
196 #endif
197 	if (CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRQ_TIMER_INIT_VAL)) > 0) {
198 		SSI_LOG_DEBUG("irq_delay=%d CC cycles\n",
199 			      CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRQ_TIMER_INIT_VAL)));
200 	}
201 #endif
202 
203 	cache_params = (drvdata->coherent ? CC_COHERENT_CACHE_PARAMS : 0x0);
204 
205 	val = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_CACHE_PARAMS));
206 
207 	if (is_probe)
208 		SSI_LOG_INFO("Cache params previous: 0x%08X\n", val);
209 
210 	CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_CACHE_PARAMS),
211 			      cache_params);
212 	val = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_CACHE_PARAMS));
213 
214 	if (is_probe)
215 		SSI_LOG_INFO("Cache params current: 0x%08X (expect: 0x%08X)\n",
216 			     val, cache_params);
217 
218 	return 0;
219 }
220 
init_cc_resources(struct platform_device * plat_dev)221 static int init_cc_resources(struct platform_device *plat_dev)
222 {
223 	struct resource *req_mem_cc_regs = NULL;
224 	void __iomem *cc_base = NULL;
225 	bool irq_registered = false;
226 	struct ssi_drvdata *new_drvdata = kzalloc(sizeof(*new_drvdata),
227 						  GFP_KERNEL);
228 	struct device *dev = &plat_dev->dev;
229 	struct device_node *np = dev->of_node;
230 	u32 signature_val;
231 	int rc = 0;
232 
233 	if (unlikely(!new_drvdata)) {
234 		SSI_LOG_ERR("Failed to allocate drvdata");
235 		rc = -ENOMEM;
236 		goto init_cc_res_err;
237 	}
238 
239 	new_drvdata->clk = of_clk_get(np, 0);
240 	new_drvdata->coherent = of_dma_is_coherent(np);
241 
242 	/*Initialize inflight counter used in dx_ablkcipher_secure_complete used for count of BYSPASS blocks operations*/
243 	new_drvdata->inflight_counter = 0;
244 
245 	dev_set_drvdata(&plat_dev->dev, new_drvdata);
246 	/* Get device resources */
247 	/* First CC registers space */
248 	new_drvdata->res_mem = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
249 	if (unlikely(!new_drvdata->res_mem)) {
250 		SSI_LOG_ERR("Failed getting IO memory resource\n");
251 		rc = -ENODEV;
252 		goto init_cc_res_err;
253 	}
254 	SSI_LOG_DEBUG("Got MEM resource (%s): start=%pad end=%pad\n",
255 		      new_drvdata->res_mem->name,
256 		      new_drvdata->res_mem->start,
257 		      new_drvdata->res_mem->end);
258 	/* Map registers space */
259 	req_mem_cc_regs = request_mem_region(new_drvdata->res_mem->start, resource_size(new_drvdata->res_mem), "arm_cc7x_regs");
260 	if (unlikely(!req_mem_cc_regs)) {
261 		SSI_LOG_ERR("Couldn't allocate registers memory region at "
262 			     "0x%08X\n", (unsigned int)new_drvdata->res_mem->start);
263 		rc = -EBUSY;
264 		goto init_cc_res_err;
265 	}
266 	cc_base = ioremap(new_drvdata->res_mem->start, resource_size(new_drvdata->res_mem));
267 	if (unlikely(!cc_base)) {
268 		SSI_LOG_ERR("ioremap[CC](0x%08X,0x%08X) failed\n",
269 			    (unsigned int)new_drvdata->res_mem->start,
270 			    (unsigned int)resource_size(new_drvdata->res_mem));
271 		rc = -ENOMEM;
272 		goto init_cc_res_err;
273 	}
274 	SSI_LOG_DEBUG("CC registers mapped from %pa to 0x%p\n", &new_drvdata->res_mem->start, cc_base);
275 	new_drvdata->cc_base = cc_base;
276 
277 	/* Then IRQ */
278 	new_drvdata->res_irq = platform_get_resource(plat_dev, IORESOURCE_IRQ, 0);
279 	if (unlikely(!new_drvdata->res_irq)) {
280 		SSI_LOG_ERR("Failed getting IRQ resource\n");
281 		rc = -ENODEV;
282 		goto init_cc_res_err;
283 	}
284 	rc = request_irq(new_drvdata->res_irq->start, cc_isr,
285 			 IRQF_SHARED, "arm_cc7x", new_drvdata);
286 	if (unlikely(rc != 0)) {
287 		SSI_LOG_ERR("Could not register to interrupt %llu\n",
288 			    (unsigned long long)new_drvdata->res_irq->start);
289 		goto init_cc_res_err;
290 	}
291 	init_completion(&new_drvdata->icache_setup_completion);
292 
293 	irq_registered = true;
294 	SSI_LOG_DEBUG("Registered to IRQ (%s) %llu\n",
295 		      new_drvdata->res_irq->name,
296 		      (unsigned long long)new_drvdata->res_irq->start);
297 
298 	new_drvdata->plat_dev = plat_dev;
299 
300 	rc = cc_clk_on(new_drvdata);
301 	if (rc)
302 		goto init_cc_res_err;
303 
304 	if (!new_drvdata->plat_dev->dev.dma_mask)
305 		new_drvdata->plat_dev->dev.dma_mask = &new_drvdata->plat_dev->dev.coherent_dma_mask;
306 
307 	if (!new_drvdata->plat_dev->dev.coherent_dma_mask)
308 		new_drvdata->plat_dev->dev.coherent_dma_mask = DMA_BIT_MASK(DMA_BIT_MASK_LEN);
309 
310 	/* Verify correct mapping */
311 	signature_val = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_SIGNATURE));
312 	if (signature_val != DX_DEV_SIGNATURE) {
313 		SSI_LOG_ERR("Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n",
314 			    signature_val, (u32)DX_DEV_SIGNATURE);
315 		rc = -EINVAL;
316 		goto init_cc_res_err;
317 	}
318 	SSI_LOG_DEBUG("CC SIGNATURE=0x%08X\n", signature_val);
319 
320 	/* Display HW versions */
321 	SSI_LOG(KERN_INFO, "ARM CryptoCell %s Driver: HW version 0x%08X, Driver version %s\n", SSI_DEV_NAME_STR,
322 		CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_VERSION)), DRV_MODULE_VERSION);
323 
324 	rc = init_cc_regs(new_drvdata, true);
325 	if (unlikely(rc != 0)) {
326 		SSI_LOG_ERR("init_cc_regs failed\n");
327 		goto init_cc_res_err;
328 	}
329 
330 #ifdef ENABLE_CC_SYSFS
331 	rc = ssi_sysfs_init(&plat_dev->dev.kobj, new_drvdata);
332 	if (unlikely(rc != 0)) {
333 		SSI_LOG_ERR("init_stat_db failed\n");
334 		goto init_cc_res_err;
335 	}
336 #endif
337 
338 	rc = ssi_sram_mgr_init(new_drvdata);
339 	if (unlikely(rc != 0)) {
340 		SSI_LOG_ERR("ssi_sram_mgr_init failed\n");
341 		goto init_cc_res_err;
342 	}
343 
344 	new_drvdata->mlli_sram_addr =
345 		ssi_sram_mgr_alloc(new_drvdata, MAX_MLLI_BUFF_SIZE);
346 	if (unlikely(new_drvdata->mlli_sram_addr == NULL_SRAM_ADDR)) {
347 		SSI_LOG_ERR("Failed to alloc MLLI Sram buffer\n");
348 		rc = -ENOMEM;
349 		goto init_cc_res_err;
350 	}
351 
352 	rc = request_mgr_init(new_drvdata);
353 	if (unlikely(rc != 0)) {
354 		SSI_LOG_ERR("request_mgr_init failed\n");
355 		goto init_cc_res_err;
356 	}
357 
358 	rc = ssi_buffer_mgr_init(new_drvdata);
359 	if (unlikely(rc != 0)) {
360 		SSI_LOG_ERR("buffer_mgr_init failed\n");
361 		goto init_cc_res_err;
362 	}
363 
364 	rc = ssi_power_mgr_init(new_drvdata);
365 	if (unlikely(rc != 0)) {
366 		SSI_LOG_ERR("ssi_power_mgr_init failed\n");
367 		goto init_cc_res_err;
368 	}
369 
370 	rc = ssi_fips_init(new_drvdata);
371 	if (unlikely(rc != 0)) {
372 		SSI_LOG_ERR("SSI_FIPS_INIT failed 0x%x\n", rc);
373 		goto init_cc_res_err;
374 	}
375 
376 	rc = ssi_ivgen_init(new_drvdata);
377 	if (unlikely(rc != 0)) {
378 		SSI_LOG_ERR("ssi_ivgen_init failed\n");
379 		goto init_cc_res_err;
380 	}
381 
382 	/* Allocate crypto algs */
383 	rc = ssi_ablkcipher_alloc(new_drvdata);
384 	if (unlikely(rc != 0)) {
385 		SSI_LOG_ERR("ssi_ablkcipher_alloc failed\n");
386 		goto init_cc_res_err;
387 	}
388 
389 	/* hash must be allocated before aead since hash exports APIs */
390 	rc = ssi_hash_alloc(new_drvdata);
391 	if (unlikely(rc != 0)) {
392 		SSI_LOG_ERR("ssi_hash_alloc failed\n");
393 		goto init_cc_res_err;
394 	}
395 
396 	rc = ssi_aead_alloc(new_drvdata);
397 	if (unlikely(rc != 0)) {
398 		SSI_LOG_ERR("ssi_aead_alloc failed\n");
399 		goto init_cc_res_err;
400 	}
401 
402 	/* If we got here and FIPS mode is enabled
403 	 * it means all FIPS test passed, so let TEE
404 	 * know we're good.
405 	 */
406 	cc_set_ree_fips_status(new_drvdata, true);
407 
408 	return 0;
409 
410 init_cc_res_err:
411 	SSI_LOG_ERR("Freeing CC HW resources!\n");
412 
413 	if (new_drvdata) {
414 		ssi_aead_free(new_drvdata);
415 		ssi_hash_free(new_drvdata);
416 		ssi_ablkcipher_free(new_drvdata);
417 		ssi_ivgen_fini(new_drvdata);
418 		ssi_power_mgr_fini(new_drvdata);
419 		ssi_buffer_mgr_fini(new_drvdata);
420 		request_mgr_fini(new_drvdata);
421 		ssi_sram_mgr_fini(new_drvdata);
422 		ssi_fips_fini(new_drvdata);
423 #ifdef ENABLE_CC_SYSFS
424 		ssi_sysfs_fini();
425 #endif
426 
427 		if (req_mem_cc_regs) {
428 			if (irq_registered) {
429 				free_irq(new_drvdata->res_irq->start, new_drvdata);
430 				new_drvdata->res_irq = NULL;
431 				iounmap(cc_base);
432 				new_drvdata->cc_base = NULL;
433 			}
434 			release_mem_region(new_drvdata->res_mem->start,
435 					   resource_size(new_drvdata->res_mem));
436 			new_drvdata->res_mem = NULL;
437 		}
438 		kfree(new_drvdata);
439 		dev_set_drvdata(&plat_dev->dev, NULL);
440 	}
441 
442 	return rc;
443 }
444 
fini_cc_regs(struct ssi_drvdata * drvdata)445 void fini_cc_regs(struct ssi_drvdata *drvdata)
446 {
447 	/* Mask all interrupts */
448 	WRITE_REGISTER(drvdata->cc_base +
449 		       CC_REG_OFFSET(HOST_RGF, HOST_IMR), 0xFFFFFFFF);
450 }
451 
cleanup_cc_resources(struct platform_device * plat_dev)452 static void cleanup_cc_resources(struct platform_device *plat_dev)
453 {
454 	struct ssi_drvdata *drvdata =
455 		(struct ssi_drvdata *)dev_get_drvdata(&plat_dev->dev);
456 
457 	ssi_aead_free(drvdata);
458 	ssi_hash_free(drvdata);
459 	ssi_ablkcipher_free(drvdata);
460 	ssi_ivgen_fini(drvdata);
461 	ssi_power_mgr_fini(drvdata);
462 	ssi_buffer_mgr_fini(drvdata);
463 	request_mgr_fini(drvdata);
464 	ssi_sram_mgr_fini(drvdata);
465 	ssi_fips_fini(drvdata);
466 #ifdef ENABLE_CC_SYSFS
467 	ssi_sysfs_fini();
468 #endif
469 
470 	fini_cc_regs(drvdata);
471 	cc_clk_off(drvdata);
472 	free_irq(drvdata->res_irq->start, drvdata);
473 	drvdata->res_irq = NULL;
474 
475 	if (drvdata->cc_base) {
476 		iounmap(drvdata->cc_base);
477 		release_mem_region(drvdata->res_mem->start,
478 				   resource_size(drvdata->res_mem));
479 		drvdata->cc_base = NULL;
480 		drvdata->res_mem = NULL;
481 	}
482 
483 	kfree(drvdata);
484 	dev_set_drvdata(&plat_dev->dev, NULL);
485 }
486 
cc_clk_on(struct ssi_drvdata * drvdata)487 int cc_clk_on(struct ssi_drvdata *drvdata)
488 {
489 	struct clk *clk = drvdata->clk;
490 	int rc;
491 
492 	if (IS_ERR(clk))
493 		/* Not all devices have a clock associated with CCREE  */
494 		return 0;
495 
496 	rc = clk_prepare_enable(clk);
497 	if (rc)
498 		return rc;
499 
500 	return 0;
501 }
502 
cc_clk_off(struct ssi_drvdata * drvdata)503 void cc_clk_off(struct ssi_drvdata *drvdata)
504 {
505 	struct clk *clk = drvdata->clk;
506 
507 	if (IS_ERR(clk))
508 		/* Not all devices have a clock associated with CCREE */
509 		return;
510 
511 	clk_disable_unprepare(clk);
512 }
513 
cc7x_probe(struct platform_device * plat_dev)514 static int cc7x_probe(struct platform_device *plat_dev)
515 {
516 	int rc;
517 #if defined(CONFIG_ARM) && defined(CC_DEBUG)
518 	u32 ctr, cacheline_size;
519 
520 	asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
521 	cacheline_size =  4 << ((ctr >> 16) & 0xf);
522 	SSI_LOG_DEBUG("CP15(L1_CACHE_BYTES) = %u , Kconfig(L1_CACHE_BYTES) = %u\n",
523 		      cacheline_size, L1_CACHE_BYTES);
524 
525 	asm volatile("mrc p15, 0, %0, c0, c0, 0" : "=r" (ctr));
526 	SSI_LOG_DEBUG("Main ID register (MIDR): Implementer 0x%02X, Arch 0x%01X, Part 0x%03X, Rev r%dp%d\n",
527 		      (ctr >> 24), (ctr >> 16) & 0xF, (ctr >> 4) & 0xFFF,
528 		      (ctr >> 20) & 0xF, ctr & 0xF);
529 #endif
530 
531 	/* Map registers space */
532 	rc = init_cc_resources(plat_dev);
533 	if (rc != 0)
534 		return rc;
535 
536 	SSI_LOG(KERN_INFO, "ARM cc7x_ree device initialized\n");
537 
538 	return 0;
539 }
540 
cc7x_remove(struct platform_device * plat_dev)541 static int cc7x_remove(struct platform_device *plat_dev)
542 {
543 	SSI_LOG_DEBUG("Releasing cc7x resources...\n");
544 
545 	cleanup_cc_resources(plat_dev);
546 
547 	SSI_LOG(KERN_INFO, "ARM cc7x_ree device terminated\n");
548 
549 	return 0;
550 }
551 
552 #if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
553 static const struct dev_pm_ops arm_cc7x_driver_pm = {
554 	SET_RUNTIME_PM_OPS(ssi_power_mgr_runtime_suspend, ssi_power_mgr_runtime_resume, NULL)
555 };
556 #endif
557 
558 #if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
559 #define	DX_DRIVER_RUNTIME_PM	(&arm_cc7x_driver_pm)
560 #else
561 #define	DX_DRIVER_RUNTIME_PM	NULL
562 #endif
563 
564 #ifdef CONFIG_OF
565 static const struct of_device_id arm_cc7x_dev_of_match[] = {
566 	{.compatible = "arm,cryptocell-712-ree"},
567 	{}
568 };
569 MODULE_DEVICE_TABLE(of, arm_cc7x_dev_of_match);
570 #endif
571 
572 static struct platform_driver cc7x_driver = {
573 	.driver = {
574 		   .name = "cc7xree",
575 #ifdef CONFIG_OF
576 		   .of_match_table = arm_cc7x_dev_of_match,
577 #endif
578 		   .pm = DX_DRIVER_RUNTIME_PM,
579 	},
580 	.probe = cc7x_probe,
581 	.remove = cc7x_remove,
582 };
583 module_platform_driver(cc7x_driver);
584 
585 /* Module description */
586 MODULE_DESCRIPTION("ARM TrustZone CryptoCell REE Driver");
587 MODULE_VERSION(DRV_MODULE_VERSION);
588 MODULE_AUTHOR("ARM");
589 MODULE_LICENSE("GPL v2");
590