• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <adf_accel_devices.h>
4 #include <adf_pf2vf_msg.h>
5 #include <adf_common_drv.h>
6 #include "adf_dh895xcc_hw_data.h"
7 
8 /* Worker thread to service arbiter mappings based on dev SKUs */
9 static const u32 thrd_to_arb_map_sku4[] = {
10 	0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
11 	0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
12 	0x00000000, 0x00000000, 0x00000000, 0x00000000
13 };
14 
15 static const u32 thrd_to_arb_map_sku6[] = {
16 	0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
17 	0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
18 	0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222
19 };
20 
21 static struct adf_hw_device_class dh895xcc_class = {
22 	.name = ADF_DH895XCC_DEVICE_NAME,
23 	.type = DEV_DH895XCC,
24 	.instances = 0
25 };
26 
get_accel_mask(u32 fuse)27 static u32 get_accel_mask(u32 fuse)
28 {
29 	return (~fuse) >> ADF_DH895XCC_ACCELERATORS_REG_OFFSET &
30 			  ADF_DH895XCC_ACCELERATORS_MASK;
31 }
32 
get_ae_mask(u32 fuse)33 static u32 get_ae_mask(u32 fuse)
34 {
35 	return (~fuse) & ADF_DH895XCC_ACCELENGINES_MASK;
36 }
37 
get_num_accels(struct adf_hw_device_data * self)38 static u32 get_num_accels(struct adf_hw_device_data *self)
39 {
40 	u32 i, ctr = 0;
41 
42 	if (!self || !self->accel_mask)
43 		return 0;
44 
45 	for (i = 0; i < ADF_DH895XCC_MAX_ACCELERATORS; i++) {
46 		if (self->accel_mask & (1 << i))
47 			ctr++;
48 	}
49 	return ctr;
50 }
51 
get_num_aes(struct adf_hw_device_data * self)52 static u32 get_num_aes(struct adf_hw_device_data *self)
53 {
54 	u32 i, ctr = 0;
55 
56 	if (!self || !self->ae_mask)
57 		return 0;
58 
59 	for (i = 0; i < ADF_DH895XCC_MAX_ACCELENGINES; i++) {
60 		if (self->ae_mask & (1 << i))
61 			ctr++;
62 	}
63 	return ctr;
64 }
65 
get_misc_bar_id(struct adf_hw_device_data * self)66 static u32 get_misc_bar_id(struct adf_hw_device_data *self)
67 {
68 	return ADF_DH895XCC_PMISC_BAR;
69 }
70 
get_etr_bar_id(struct adf_hw_device_data * self)71 static u32 get_etr_bar_id(struct adf_hw_device_data *self)
72 {
73 	return ADF_DH895XCC_ETR_BAR;
74 }
75 
get_sram_bar_id(struct adf_hw_device_data * self)76 static u32 get_sram_bar_id(struct adf_hw_device_data *self)
77 {
78 	return ADF_DH895XCC_SRAM_BAR;
79 }
80 
get_sku(struct adf_hw_device_data * self)81 static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
82 {
83 	int sku = (self->fuses & ADF_DH895XCC_FUSECTL_SKU_MASK)
84 	    >> ADF_DH895XCC_FUSECTL_SKU_SHIFT;
85 
86 	switch (sku) {
87 	case ADF_DH895XCC_FUSECTL_SKU_1:
88 		return DEV_SKU_1;
89 	case ADF_DH895XCC_FUSECTL_SKU_2:
90 		return DEV_SKU_2;
91 	case ADF_DH895XCC_FUSECTL_SKU_3:
92 		return DEV_SKU_3;
93 	case ADF_DH895XCC_FUSECTL_SKU_4:
94 		return DEV_SKU_4;
95 	default:
96 		return DEV_SKU_UNKNOWN;
97 	}
98 	return DEV_SKU_UNKNOWN;
99 }
100 
adf_get_arbiter_mapping(struct adf_accel_dev * accel_dev,u32 const ** arb_map_config)101 static void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
102 				    u32 const **arb_map_config)
103 {
104 	switch (accel_dev->accel_pci_dev.sku) {
105 	case DEV_SKU_1:
106 		*arb_map_config = thrd_to_arb_map_sku4;
107 		break;
108 
109 	case DEV_SKU_2:
110 	case DEV_SKU_4:
111 		*arb_map_config = thrd_to_arb_map_sku6;
112 		break;
113 	default:
114 		dev_err(&GET_DEV(accel_dev),
115 			"The configuration doesn't match any SKU");
116 		*arb_map_config = NULL;
117 	}
118 }
119 
get_pf2vf_offset(u32 i)120 static u32 get_pf2vf_offset(u32 i)
121 {
122 	return ADF_DH895XCC_PF2VF_OFFSET(i);
123 }
124 
get_vintmsk_offset(u32 i)125 static u32 get_vintmsk_offset(u32 i)
126 {
127 	return ADF_DH895XCC_VINTMSK_OFFSET(i);
128 }
129 
adf_enable_error_correction(struct adf_accel_dev * accel_dev)130 static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
131 {
132 	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
133 	struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR];
134 	void __iomem *csr = misc_bar->virt_addr;
135 	unsigned int val, i;
136 
137 	/* Enable Accel Engine error detection & correction */
138 	for (i = 0; i < hw_device->get_num_aes(hw_device); i++) {
139 		val = ADF_CSR_RD(csr, ADF_DH895XCC_AE_CTX_ENABLES(i));
140 		val |= ADF_DH895XCC_ENABLE_AE_ECC_ERR;
141 		ADF_CSR_WR(csr, ADF_DH895XCC_AE_CTX_ENABLES(i), val);
142 		val = ADF_CSR_RD(csr, ADF_DH895XCC_AE_MISC_CONTROL(i));
143 		val |= ADF_DH895XCC_ENABLE_AE_ECC_PARITY_CORR;
144 		ADF_CSR_WR(csr, ADF_DH895XCC_AE_MISC_CONTROL(i), val);
145 	}
146 
147 	/* Enable shared memory error detection & correction */
148 	for (i = 0; i < hw_device->get_num_accels(hw_device); i++) {
149 		val = ADF_CSR_RD(csr, ADF_DH895XCC_UERRSSMSH(i));
150 		val |= ADF_DH895XCC_ERRSSMSH_EN;
151 		ADF_CSR_WR(csr, ADF_DH895XCC_UERRSSMSH(i), val);
152 		val = ADF_CSR_RD(csr, ADF_DH895XCC_CERRSSMSH(i));
153 		val |= ADF_DH895XCC_ERRSSMSH_EN;
154 		ADF_CSR_WR(csr, ADF_DH895XCC_CERRSSMSH(i), val);
155 	}
156 }
157 
adf_enable_ints(struct adf_accel_dev * accel_dev)158 static void adf_enable_ints(struct adf_accel_dev *accel_dev)
159 {
160 	void __iomem *addr;
161 
162 	addr = (&GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR])->virt_addr;
163 
164 	/* Enable bundle and misc interrupts */
165 	ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF0_MASK_OFFSET,
166 		   accel_dev->pf.vf_info ? 0 :
167 			GENMASK_ULL(GET_MAX_BANKS(accel_dev) - 1, 0));
168 	ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF1_MASK_OFFSET,
169 		   ADF_DH895XCC_SMIA1_MASK);
170 }
171 
adf_pf_enable_vf2pf_comms(struct adf_accel_dev * accel_dev)172 static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
173 {
174 	return 0;
175 }
176 
adf_init_hw_data_dh895xcc(struct adf_hw_device_data * hw_data)177 void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
178 {
179 	hw_data->dev_class = &dh895xcc_class;
180 	hw_data->instance_id = dh895xcc_class.instances++;
181 	hw_data->num_banks = ADF_DH895XCC_ETR_MAX_BANKS;
182 	hw_data->num_accel = ADF_DH895XCC_MAX_ACCELERATORS;
183 	hw_data->num_logical_accel = 1;
184 	hw_data->num_engines = ADF_DH895XCC_MAX_ACCELENGINES;
185 	hw_data->tx_rx_gap = ADF_DH895XCC_RX_RINGS_OFFSET;
186 	hw_data->tx_rings_mask = ADF_DH895XCC_TX_RINGS_MASK;
187 	hw_data->alloc_irq = adf_isr_resource_alloc;
188 	hw_data->free_irq = adf_isr_resource_free;
189 	hw_data->enable_error_correction = adf_enable_error_correction;
190 	hw_data->get_accel_mask = get_accel_mask;
191 	hw_data->get_ae_mask = get_ae_mask;
192 	hw_data->get_num_accels = get_num_accels;
193 	hw_data->get_num_aes = get_num_aes;
194 	hw_data->get_etr_bar_id = get_etr_bar_id;
195 	hw_data->get_misc_bar_id = get_misc_bar_id;
196 	hw_data->get_pf2vf_offset = get_pf2vf_offset;
197 	hw_data->get_vintmsk_offset = get_vintmsk_offset;
198 	hw_data->get_sram_bar_id = get_sram_bar_id;
199 	hw_data->get_sku = get_sku;
200 	hw_data->fw_name = ADF_DH895XCC_FW;
201 	hw_data->fw_mmp_name = ADF_DH895XCC_MMP;
202 	hw_data->init_admin_comms = adf_init_admin_comms;
203 	hw_data->exit_admin_comms = adf_exit_admin_comms;
204 	hw_data->disable_iov = adf_disable_sriov;
205 	hw_data->send_admin_init = adf_send_admin_init;
206 	hw_data->init_arb = adf_init_arb;
207 	hw_data->exit_arb = adf_exit_arb;
208 	hw_data->get_arb_mapping = adf_get_arbiter_mapping;
209 	hw_data->enable_ints = adf_enable_ints;
210 	hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
211 	hw_data->reset_device = adf_reset_sbr;
212 	hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
213 }
214 
adf_clean_hw_data_dh895xcc(struct adf_hw_device_data * hw_data)215 void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
216 {
217 	hw_data->dev_class->instances--;
218 }
219