• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <adf_accel_devices.h>
4 #include <adf_common_drv.h>
5 #include <adf_pf2vf_msg.h>
6 #include <adf_gen2_hw_data.h>
7 #include "adf_c3xxx_hw_data.h"
8 #include "icp_qat_hw.h"
9 
10 /* Worker thread to service arbiter mappings */
11 static const u32 thrd_to_arb_map[ADF_C3XXX_MAX_ACCELENGINES] = {
12 	0x12222AAA, 0x11222AAA, 0x12222AAA,
13 	0x11222AAA, 0x12222AAA, 0x11222AAA
14 };
15 
16 static struct adf_hw_device_class c3xxx_class = {
17 	.name = ADF_C3XXX_DEVICE_NAME,
18 	.type = DEV_C3XXX,
19 	.instances = 0
20 };
21 
get_accel_mask(struct adf_hw_device_data * self)22 static u32 get_accel_mask(struct adf_hw_device_data *self)
23 {
24 	u32 straps = self->straps;
25 	u32 fuses = self->fuses;
26 	u32 accel;
27 
28 	accel = ~(fuses | straps) >> ADF_C3XXX_ACCELERATORS_REG_OFFSET;
29 	accel &= ADF_C3XXX_ACCELERATORS_MASK;
30 
31 	return accel;
32 }
33 
get_ae_mask(struct adf_hw_device_data * self)34 static u32 get_ae_mask(struct adf_hw_device_data *self)
35 {
36 	u32 straps = self->straps;
37 	u32 fuses = self->fuses;
38 	unsigned long disabled;
39 	u32 ae_disable;
40 	int accel;
41 
42 	/* If an accel is disabled, then disable the corresponding two AEs */
43 	disabled = ~get_accel_mask(self) & ADF_C3XXX_ACCELERATORS_MASK;
44 	ae_disable = BIT(1) | BIT(0);
45 	for_each_set_bit(accel, &disabled, ADF_C3XXX_MAX_ACCELERATORS)
46 		straps |= ae_disable << (accel << 1);
47 
48 	return ~(fuses | straps) & ADF_C3XXX_ACCELENGINES_MASK;
49 }
50 
get_num_accels(struct adf_hw_device_data * self)51 static u32 get_num_accels(struct adf_hw_device_data *self)
52 {
53 	u32 i, ctr = 0;
54 
55 	if (!self || !self->accel_mask)
56 		return 0;
57 
58 	for (i = 0; i < ADF_C3XXX_MAX_ACCELERATORS; i++) {
59 		if (self->accel_mask & (1 << i))
60 			ctr++;
61 	}
62 	return ctr;
63 }
64 
get_num_aes(struct adf_hw_device_data * self)65 static u32 get_num_aes(struct adf_hw_device_data *self)
66 {
67 	u32 i, ctr = 0;
68 
69 	if (!self || !self->ae_mask)
70 		return 0;
71 
72 	for (i = 0; i < ADF_C3XXX_MAX_ACCELENGINES; i++) {
73 		if (self->ae_mask & (1 << i))
74 			ctr++;
75 	}
76 	return ctr;
77 }
78 
get_misc_bar_id(struct adf_hw_device_data * self)79 static u32 get_misc_bar_id(struct adf_hw_device_data *self)
80 {
81 	return ADF_C3XXX_PMISC_BAR;
82 }
83 
get_etr_bar_id(struct adf_hw_device_data * self)84 static u32 get_etr_bar_id(struct adf_hw_device_data *self)
85 {
86 	return ADF_C3XXX_ETR_BAR;
87 }
88 
get_sram_bar_id(struct adf_hw_device_data * self)89 static u32 get_sram_bar_id(struct adf_hw_device_data *self)
90 {
91 	return 0;
92 }
93 
get_sku(struct adf_hw_device_data * self)94 static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
95 {
96 	int aes = get_num_aes(self);
97 
98 	if (aes == 6)
99 		return DEV_SKU_4;
100 
101 	return DEV_SKU_UNKNOWN;
102 }
103 
adf_get_arbiter_mapping(void)104 static const u32 *adf_get_arbiter_mapping(void)
105 {
106 	return thrd_to_arb_map;
107 }
108 
get_pf2vf_offset(u32 i)109 static u32 get_pf2vf_offset(u32 i)
110 {
111 	return ADF_C3XXX_PF2VF_OFFSET(i);
112 }
113 
adf_enable_error_correction(struct adf_accel_dev * accel_dev)114 static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
115 {
116 	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
117 	struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_C3XXX_PMISC_BAR];
118 	unsigned long accel_mask = hw_device->accel_mask;
119 	unsigned long ae_mask = hw_device->ae_mask;
120 	void __iomem *csr = misc_bar->virt_addr;
121 	unsigned int val, i;
122 
123 	/* Enable Accel Engine error detection & correction */
124 	for_each_set_bit(i, &ae_mask, GET_MAX_ACCELENGINES(accel_dev)) {
125 		val = ADF_CSR_RD(csr, ADF_C3XXX_AE_CTX_ENABLES(i));
126 		val |= ADF_C3XXX_ENABLE_AE_ECC_ERR;
127 		ADF_CSR_WR(csr, ADF_C3XXX_AE_CTX_ENABLES(i), val);
128 		val = ADF_CSR_RD(csr, ADF_C3XXX_AE_MISC_CONTROL(i));
129 		val |= ADF_C3XXX_ENABLE_AE_ECC_PARITY_CORR;
130 		ADF_CSR_WR(csr, ADF_C3XXX_AE_MISC_CONTROL(i), val);
131 	}
132 
133 	/* Enable shared memory error detection & correction */
134 	for_each_set_bit(i, &accel_mask, ADF_C3XXX_MAX_ACCELERATORS) {
135 		val = ADF_CSR_RD(csr, ADF_C3XXX_UERRSSMSH(i));
136 		val |= ADF_C3XXX_ERRSSMSH_EN;
137 		ADF_CSR_WR(csr, ADF_C3XXX_UERRSSMSH(i), val);
138 		val = ADF_CSR_RD(csr, ADF_C3XXX_CERRSSMSH(i));
139 		val |= ADF_C3XXX_ERRSSMSH_EN;
140 		ADF_CSR_WR(csr, ADF_C3XXX_CERRSSMSH(i), val);
141 	}
142 }
143 
adf_enable_ints(struct adf_accel_dev * accel_dev)144 static void adf_enable_ints(struct adf_accel_dev *accel_dev)
145 {
146 	void __iomem *addr;
147 
148 	addr = (&GET_BARS(accel_dev)[ADF_C3XXX_PMISC_BAR])->virt_addr;
149 
150 	/* Enable bundle and misc interrupts */
151 	ADF_CSR_WR(addr, ADF_C3XXX_SMIAPF0_MASK_OFFSET,
152 		   ADF_C3XXX_SMIA0_MASK);
153 	ADF_CSR_WR(addr, ADF_C3XXX_SMIAPF1_MASK_OFFSET,
154 		   ADF_C3XXX_SMIA1_MASK);
155 }
156 
adf_enable_pf2vf_comms(struct adf_accel_dev * accel_dev)157 static int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
158 {
159 	spin_lock_init(&accel_dev->pf.vf2pf_ints_lock);
160 
161 	return 0;
162 }
163 
configure_iov_threads(struct adf_accel_dev * accel_dev,bool enable)164 static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable)
165 {
166 	adf_gen2_cfg_iov_thds(accel_dev, enable,
167 			      ADF_C3XXX_AE2FUNC_MAP_GRP_A_NUM_REGS,
168 			      ADF_C3XXX_AE2FUNC_MAP_GRP_B_NUM_REGS);
169 }
170 
adf_init_hw_data_c3xxx(struct adf_hw_device_data * hw_data)171 void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
172 {
173 	hw_data->dev_class = &c3xxx_class;
174 	hw_data->instance_id = c3xxx_class.instances++;
175 	hw_data->num_banks = ADF_C3XXX_ETR_MAX_BANKS;
176 	hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK;
177 	hw_data->num_accel = ADF_C3XXX_MAX_ACCELERATORS;
178 	hw_data->num_logical_accel = 1;
179 	hw_data->num_engines = ADF_C3XXX_MAX_ACCELENGINES;
180 	hw_data->tx_rx_gap = ADF_C3XXX_RX_RINGS_OFFSET;
181 	hw_data->tx_rings_mask = ADF_C3XXX_TX_RINGS_MASK;
182 	hw_data->alloc_irq = adf_isr_resource_alloc;
183 	hw_data->free_irq = adf_isr_resource_free;
184 	hw_data->enable_error_correction = adf_enable_error_correction;
185 	hw_data->get_accel_mask = get_accel_mask;
186 	hw_data->get_ae_mask = get_ae_mask;
187 	hw_data->get_accel_cap = adf_gen2_get_accel_cap;
188 	hw_data->get_num_accels = get_num_accels;
189 	hw_data->get_num_aes = get_num_aes;
190 	hw_data->get_sram_bar_id = get_sram_bar_id;
191 	hw_data->get_etr_bar_id = get_etr_bar_id;
192 	hw_data->get_misc_bar_id = get_misc_bar_id;
193 	hw_data->get_admin_info = adf_gen2_get_admin_info;
194 	hw_data->get_arb_info = adf_gen2_get_arb_info;
195 	hw_data->get_sku = get_sku;
196 	hw_data->fw_name = ADF_C3XXX_FW;
197 	hw_data->fw_mmp_name = ADF_C3XXX_MMP;
198 	hw_data->init_admin_comms = adf_init_admin_comms;
199 	hw_data->exit_admin_comms = adf_exit_admin_comms;
200 	hw_data->configure_iov_threads = configure_iov_threads;
201 	hw_data->send_admin_init = adf_send_admin_init;
202 	hw_data->init_arb = adf_init_arb;
203 	hw_data->exit_arb = adf_exit_arb;
204 	hw_data->get_arb_mapping = adf_get_arbiter_mapping;
205 	hw_data->enable_ints = adf_enable_ints;
206 	hw_data->reset_device = adf_reset_flr;
207 	hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
208 	hw_data->get_pf2vf_offset = get_pf2vf_offset;
209 	hw_data->enable_pfvf_comms = adf_enable_pf2vf_comms;
210 	hw_data->disable_iov = adf_disable_sriov;
211 	hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
212 
213 	adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
214 }
215 
adf_clean_hw_data_c3xxx(struct adf_hw_device_data * hw_data)216 void adf_clean_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
217 {
218 	hw_data->dev_class->instances--;
219 }
220