• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <adf_accel_devices.h>
4 #include <adf_common_drv.h>
5 #include <adf_pf2vf_msg.h>
6 #include "adf_c62x_hw_data.h"
7 
8 /* Worker thread to service arbiter mappings based on dev SKUs */
9 static const u32 thrd_to_arb_map_8_me_sku[] = {
10 	0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA,
11 	0x11222AAA, 0x12222AAA, 0x11222AAA, 0, 0
12 };
13 
14 static const u32 thrd_to_arb_map_10_me_sku[] = {
15 	0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA,
16 	0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA
17 };
18 
19 static struct adf_hw_device_class c62x_class = {
20 	.name = ADF_C62X_DEVICE_NAME,
21 	.type = DEV_C62X,
22 	.instances = 0
23 };
24 
get_accel_mask(struct adf_hw_device_data * self)25 static u32 get_accel_mask(struct adf_hw_device_data *self)
26 {
27 	u32 straps = self->straps;
28 	u32 fuses = self->fuses;
29 	u32 accel;
30 
31 	accel = ~(fuses | straps) >> ADF_C62X_ACCELERATORS_REG_OFFSET;
32 	accel &= ADF_C62X_ACCELERATORS_MASK;
33 
34 	return accel;
35 }
36 
get_ae_mask(struct adf_hw_device_data * self)37 static u32 get_ae_mask(struct adf_hw_device_data *self)
38 {
39 	u32 straps = self->straps;
40 	u32 fuses = self->fuses;
41 	unsigned long disabled;
42 	u32 ae_disable;
43 	int accel;
44 
45 	/* If an accel is disabled, then disable the corresponding two AEs */
46 	disabled = ~get_accel_mask(self) & ADF_C62X_ACCELERATORS_MASK;
47 	ae_disable = BIT(1) | BIT(0);
48 	for_each_set_bit(accel, &disabled, ADF_C62X_MAX_ACCELERATORS)
49 		straps |= ae_disable << (accel << 1);
50 
51 	return ~(fuses | straps) & ADF_C62X_ACCELENGINES_MASK;
52 }
53 
get_num_accels(struct adf_hw_device_data * self)54 static u32 get_num_accels(struct adf_hw_device_data *self)
55 {
56 	u32 i, ctr = 0;
57 
58 	if (!self || !self->accel_mask)
59 		return 0;
60 
61 	for (i = 0; i < ADF_C62X_MAX_ACCELERATORS; i++) {
62 		if (self->accel_mask & (1 << i))
63 			ctr++;
64 	}
65 	return ctr;
66 }
67 
get_num_aes(struct adf_hw_device_data * self)68 static u32 get_num_aes(struct adf_hw_device_data *self)
69 {
70 	u32 i, ctr = 0;
71 
72 	if (!self || !self->ae_mask)
73 		return 0;
74 
75 	for (i = 0; i < ADF_C62X_MAX_ACCELENGINES; i++) {
76 		if (self->ae_mask & (1 << i))
77 			ctr++;
78 	}
79 	return ctr;
80 }
81 
get_misc_bar_id(struct adf_hw_device_data * self)82 static u32 get_misc_bar_id(struct adf_hw_device_data *self)
83 {
84 	return ADF_C62X_PMISC_BAR;
85 }
86 
get_etr_bar_id(struct adf_hw_device_data * self)87 static u32 get_etr_bar_id(struct adf_hw_device_data *self)
88 {
89 	return ADF_C62X_ETR_BAR;
90 }
91 
get_sram_bar_id(struct adf_hw_device_data * self)92 static u32 get_sram_bar_id(struct adf_hw_device_data *self)
93 {
94 	return ADF_C62X_SRAM_BAR;
95 }
96 
get_sku(struct adf_hw_device_data * self)97 static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
98 {
99 	int aes = get_num_aes(self);
100 
101 	if (aes == 8)
102 		return DEV_SKU_2;
103 	else if (aes == 10)
104 		return DEV_SKU_4;
105 
106 	return DEV_SKU_UNKNOWN;
107 }
108 
adf_get_arbiter_mapping(struct adf_accel_dev * accel_dev,u32 const ** arb_map_config)109 static void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
110 				    u32 const **arb_map_config)
111 {
112 	switch (accel_dev->accel_pci_dev.sku) {
113 	case DEV_SKU_2:
114 		*arb_map_config = thrd_to_arb_map_8_me_sku;
115 		break;
116 	case DEV_SKU_4:
117 		*arb_map_config = thrd_to_arb_map_10_me_sku;
118 		break;
119 	default:
120 		dev_err(&GET_DEV(accel_dev),
121 			"The configuration doesn't match any SKU");
122 		*arb_map_config = NULL;
123 	}
124 }
125 
get_pf2vf_offset(u32 i)126 static u32 get_pf2vf_offset(u32 i)
127 {
128 	return ADF_C62X_PF2VF_OFFSET(i);
129 }
130 
get_vintmsk_offset(u32 i)131 static u32 get_vintmsk_offset(u32 i)
132 {
133 	return ADF_C62X_VINTMSK_OFFSET(i);
134 }
135 
adf_enable_error_correction(struct adf_accel_dev * accel_dev)136 static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
137 {
138 	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
139 	struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_C62X_PMISC_BAR];
140 	unsigned long accel_mask = hw_device->accel_mask;
141 	unsigned long ae_mask = hw_device->ae_mask;
142 	void __iomem *csr = misc_bar->virt_addr;
143 	unsigned int val, i;
144 
145 	/* Enable Accel Engine error detection & correction */
146 	for_each_set_bit(i, &ae_mask, GET_MAX_ACCELENGINES(accel_dev)) {
147 		val = ADF_CSR_RD(csr, ADF_C62X_AE_CTX_ENABLES(i));
148 		val |= ADF_C62X_ENABLE_AE_ECC_ERR;
149 		ADF_CSR_WR(csr, ADF_C62X_AE_CTX_ENABLES(i), val);
150 		val = ADF_CSR_RD(csr, ADF_C62X_AE_MISC_CONTROL(i));
151 		val |= ADF_C62X_ENABLE_AE_ECC_PARITY_CORR;
152 		ADF_CSR_WR(csr, ADF_C62X_AE_MISC_CONTROL(i), val);
153 	}
154 
155 	/* Enable shared memory error detection & correction */
156 	for_each_set_bit(i, &accel_mask, ADF_C62X_MAX_ACCELERATORS) {
157 		val = ADF_CSR_RD(csr, ADF_C62X_UERRSSMSH(i));
158 		val |= ADF_C62X_ERRSSMSH_EN;
159 		ADF_CSR_WR(csr, ADF_C62X_UERRSSMSH(i), val);
160 		val = ADF_CSR_RD(csr, ADF_C62X_CERRSSMSH(i));
161 		val |= ADF_C62X_ERRSSMSH_EN;
162 		ADF_CSR_WR(csr, ADF_C62X_CERRSSMSH(i), val);
163 	}
164 }
165 
adf_enable_ints(struct adf_accel_dev * accel_dev)166 static void adf_enable_ints(struct adf_accel_dev *accel_dev)
167 {
168 	void __iomem *addr;
169 
170 	addr = (&GET_BARS(accel_dev)[ADF_C62X_PMISC_BAR])->virt_addr;
171 
172 	/* Enable bundle and misc interrupts */
173 	ADF_CSR_WR(addr, ADF_C62X_SMIAPF0_MASK_OFFSET,
174 		   ADF_C62X_SMIA0_MASK);
175 	ADF_CSR_WR(addr, ADF_C62X_SMIAPF1_MASK_OFFSET,
176 		   ADF_C62X_SMIA1_MASK);
177 }
178 
adf_pf_enable_vf2pf_comms(struct adf_accel_dev * accel_dev)179 static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
180 {
181 	return 0;
182 }
183 
adf_init_hw_data_c62x(struct adf_hw_device_data * hw_data)184 void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
185 {
186 	hw_data->dev_class = &c62x_class;
187 	hw_data->instance_id = c62x_class.instances++;
188 	hw_data->num_banks = ADF_C62X_ETR_MAX_BANKS;
189 	hw_data->num_accel = ADF_C62X_MAX_ACCELERATORS;
190 	hw_data->num_logical_accel = 1;
191 	hw_data->num_engines = ADF_C62X_MAX_ACCELENGINES;
192 	hw_data->tx_rx_gap = ADF_C62X_RX_RINGS_OFFSET;
193 	hw_data->tx_rings_mask = ADF_C62X_TX_RINGS_MASK;
194 	hw_data->alloc_irq = adf_isr_resource_alloc;
195 	hw_data->free_irq = adf_isr_resource_free;
196 	hw_data->enable_error_correction = adf_enable_error_correction;
197 	hw_data->get_accel_mask = get_accel_mask;
198 	hw_data->get_ae_mask = get_ae_mask;
199 	hw_data->get_num_accels = get_num_accels;
200 	hw_data->get_num_aes = get_num_aes;
201 	hw_data->get_sram_bar_id = get_sram_bar_id;
202 	hw_data->get_etr_bar_id = get_etr_bar_id;
203 	hw_data->get_misc_bar_id = get_misc_bar_id;
204 	hw_data->get_pf2vf_offset = get_pf2vf_offset;
205 	hw_data->get_vintmsk_offset = get_vintmsk_offset;
206 	hw_data->get_sku = get_sku;
207 	hw_data->fw_name = ADF_C62X_FW;
208 	hw_data->fw_mmp_name = ADF_C62X_MMP;
209 	hw_data->init_admin_comms = adf_init_admin_comms;
210 	hw_data->exit_admin_comms = adf_exit_admin_comms;
211 	hw_data->disable_iov = adf_disable_sriov;
212 	hw_data->send_admin_init = adf_send_admin_init;
213 	hw_data->init_arb = adf_init_arb;
214 	hw_data->exit_arb = adf_exit_arb;
215 	hw_data->get_arb_mapping = adf_get_arbiter_mapping;
216 	hw_data->enable_ints = adf_enable_ints;
217 	hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
218 	hw_data->reset_device = adf_reset_flr;
219 	hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
220 }
221 
adf_clean_hw_data_c62x(struct adf_hw_device_data * hw_data)222 void adf_clean_hw_data_c62x(struct adf_hw_device_data *hw_data)
223 {
224 	hw_data->dev_class->instances--;
225 }
226