1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
3
4 /* \file cc_driver.h
5 * ARM CryptoCell Linux Crypto Driver
6 */
7
8 #ifndef __CC_DRIVER_H__
9 #define __CC_DRIVER_H__
10
11 #ifdef COMP_IN_WQ
12 #include <linux/workqueue.h>
13 #else
14 #include <linux/interrupt.h>
15 #endif
16 #include <linux/dma-mapping.h>
17 #include <crypto/algapi.h>
18 #include <crypto/internal/skcipher.h>
19 #include <crypto/aes.h>
20 #include <crypto/sha.h>
21 #include <crypto/aead.h>
22 #include <crypto/authenc.h>
23 #include <crypto/hash.h>
24 #include <crypto/skcipher.h>
25 #include <linux/version.h>
26 #include <linux/clk.h>
27 #include <linux/platform_device.h>
28
29 #include "cc_host_regs.h"
30 #include "cc_crypto_ctx.h"
31 #include "cc_hw_queue_defs.h"
32 #include "cc_sram_mgr.h"
33
34 extern bool cc_dump_desc;
35 extern bool cc_dump_bytes;
36
37 #define DRV_MODULE_VERSION "5.0"
38
39 enum cc_hw_rev {
40 CC_HW_REV_630 = 630,
41 CC_HW_REV_710 = 710,
42 CC_HW_REV_712 = 712,
43 CC_HW_REV_713 = 713
44 };
45
46 enum cc_std_body {
47 CC_STD_NIST = 0x1,
48 CC_STD_OSCCA = 0x2,
49 CC_STD_ALL = 0x3
50 };
51
52 #define CC_COHERENT_CACHE_PARAMS 0xEEE
53
54 #define CC_PINS_FULL 0x0
55 #define CC_PINS_SLIM 0x9F
56
57 /* Maximum DMA mask supported by IP */
58 #define DMA_BIT_MASK_LEN 48
59
60 #define CC_AXI_IRQ_MASK ((1 << CC_AXIM_CFG_BRESPMASK_BIT_SHIFT) | \
61 (1 << CC_AXIM_CFG_RRESPMASK_BIT_SHIFT) | \
62 (1 << CC_AXIM_CFG_INFLTMASK_BIT_SHIFT) | \
63 (1 << CC_AXIM_CFG_COMPMASK_BIT_SHIFT))
64
65 #define CC_AXI_ERR_IRQ_MASK BIT(CC_HOST_IRR_AXI_ERR_INT_BIT_SHIFT)
66
67 #define CC_COMP_IRQ_MASK BIT(CC_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT)
68
69 #define CC_SECURITY_DISABLED_MASK BIT(CC_SECURITY_DISABLED_VALUE_BIT_SHIFT)
70
71 #define CC_NVM_IS_IDLE_MASK BIT(CC_NVM_IS_IDLE_VALUE_BIT_SHIFT)
72
73 #define AXIM_MON_COMP_VALUE CC_GENMASK(CC_AXIM_MON_COMP_VALUE)
74
75 #define CC_CPP_AES_ABORT_MASK ( \
76 BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_0_MASK_BIT_SHIFT) | \
77 BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_1_MASK_BIT_SHIFT) | \
78 BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_2_MASK_BIT_SHIFT) | \
79 BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_3_MASK_BIT_SHIFT) | \
80 BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_4_MASK_BIT_SHIFT) | \
81 BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_5_MASK_BIT_SHIFT) | \
82 BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_6_MASK_BIT_SHIFT) | \
83 BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_7_MASK_BIT_SHIFT))
84
85 #define CC_CPP_SM4_ABORT_MASK ( \
86 BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_0_MASK_BIT_SHIFT) | \
87 BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_1_MASK_BIT_SHIFT) | \
88 BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_2_MASK_BIT_SHIFT) | \
89 BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_3_MASK_BIT_SHIFT) | \
90 BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_4_MASK_BIT_SHIFT) | \
91 BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_5_MASK_BIT_SHIFT) | \
92 BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_6_MASK_BIT_SHIFT) | \
93 BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_7_MASK_BIT_SHIFT))
94
95 /* Register name mangling macro */
96 #define CC_REG(reg_name) CC_ ## reg_name ## _REG_OFFSET
97
98 /* TEE FIPS status interrupt */
99 #define CC_GPR0_IRQ_MASK BIT(CC_HOST_IRR_GPR0_BIT_SHIFT)
100
101 #define CC_CRA_PRIO 400
102
103 #define MIN_HW_QUEUE_SIZE 50 /* Minimum size required for proper function */
104
105 #define MAX_REQUEST_QUEUE_SIZE 4096
106 #define MAX_MLLI_BUFF_SIZE 2080
107
108 /* Definitions for HW descriptors DIN/DOUT fields */
109 #define NS_BIT 1
110 #define AXI_ID 0
111 /* AXI_ID is not actually the AXI ID of the transaction but the value of AXI_ID
112 * field in the HW descriptor. The DMA engine +8 that value.
113 */
114
115 struct cc_cpp_req {
116 bool is_cpp;
117 enum cc_cpp_alg alg;
118 u8 slot;
119 };
120
121 #define CC_MAX_IVGEN_DMA_ADDRESSES 3
122 struct cc_crypto_req {
123 void (*user_cb)(struct device *dev, void *req, int err);
124 void *user_arg;
125 struct completion seq_compl; /* request completion */
126 struct cc_cpp_req cpp;
127 };
128
129 /**
130 * struct cc_drvdata - driver private data context
131 * @cc_base: virt address of the CC registers
132 * @irq: bitmap indicating source of last interrupt
133 */
134 struct cc_drvdata {
135 void __iomem *cc_base;
136 int irq;
137 struct completion hw_queue_avail; /* wait for HW queue availability */
138 struct platform_device *plat_dev;
139 u32 mlli_sram_addr;
140 struct dma_pool *mlli_buffs_pool;
141 struct list_head alg_list;
142 void *hash_handle;
143 void *aead_handle;
144 void *request_mgr_handle;
145 void *fips_handle;
146 u32 sram_free_offset; /* offset to non-allocated area in SRAM */
147 struct dentry *dir; /* for debugfs */
148 struct clk *clk;
149 bool coherent;
150 char *hw_rev_name;
151 enum cc_hw_rev hw_rev;
152 u32 axim_mon_offset;
153 u32 sig_offset;
154 u32 ver_offset;
155 int std_bodies;
156 bool sec_disabled;
157 u32 comp_mask;
158 };
159
160 struct cc_crypto_alg {
161 struct list_head entry;
162 int cipher_mode;
163 int flow_mode; /* Note: currently, refers to the cipher mode only. */
164 int auth_mode;
165 struct cc_drvdata *drvdata;
166 struct skcipher_alg skcipher_alg;
167 struct aead_alg aead_alg;
168 };
169
170 struct cc_alg_template {
171 char name[CRYPTO_MAX_ALG_NAME];
172 char driver_name[CRYPTO_MAX_ALG_NAME];
173 unsigned int blocksize;
174 union {
175 struct skcipher_alg skcipher;
176 struct aead_alg aead;
177 } template_u;
178 int cipher_mode;
179 int flow_mode; /* Note: currently, refers to the cipher mode only. */
180 int auth_mode;
181 u32 min_hw_rev;
182 enum cc_std_body std_body;
183 bool sec_func;
184 unsigned int data_unit;
185 struct cc_drvdata *drvdata;
186 };
187
188 struct async_gen_req_ctx {
189 dma_addr_t iv_dma_addr;
190 u8 *iv;
191 enum drv_crypto_direction op_type;
192 };
193
drvdata_to_dev(struct cc_drvdata * drvdata)194 static inline struct device *drvdata_to_dev(struct cc_drvdata *drvdata)
195 {
196 return &drvdata->plat_dev->dev;
197 }
198
199 void __dump_byte_array(const char *name, const u8 *buf, size_t len);
dump_byte_array(const char * name,const u8 * the_array,size_t size)200 static inline void dump_byte_array(const char *name, const u8 *the_array,
201 size_t size)
202 {
203 if (cc_dump_bytes)
204 __dump_byte_array(name, the_array, size);
205 }
206
207 bool cc_wait_for_reset_completion(struct cc_drvdata *drvdata);
208 int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe);
209 void fini_cc_regs(struct cc_drvdata *drvdata);
210 unsigned int cc_get_default_hash_len(struct cc_drvdata *drvdata);
211
cc_iowrite(struct cc_drvdata * drvdata,u32 reg,u32 val)212 static inline void cc_iowrite(struct cc_drvdata *drvdata, u32 reg, u32 val)
213 {
214 iowrite32(val, (drvdata->cc_base + reg));
215 }
216
cc_ioread(struct cc_drvdata * drvdata,u32 reg)217 static inline u32 cc_ioread(struct cc_drvdata *drvdata, u32 reg)
218 {
219 return ioread32(drvdata->cc_base + reg);
220 }
221
cc_gfp_flags(struct crypto_async_request * req)222 static inline gfp_t cc_gfp_flags(struct crypto_async_request *req)
223 {
224 return (req->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
225 GFP_KERNEL : GFP_ATOMIC;
226 }
227
set_queue_last_ind(struct cc_drvdata * drvdata,struct cc_hw_desc * pdesc)228 static inline void set_queue_last_ind(struct cc_drvdata *drvdata,
229 struct cc_hw_desc *pdesc)
230 {
231 if (drvdata->hw_rev >= CC_HW_REV_712)
232 set_queue_last_ind_bit(pdesc);
233 }
234
235 #endif /*__CC_DRIVER_H__*/
236