• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2023 Intel Corporation */
3 #include <linux/iopoll.h>
4 #include <adf_accel_devices.h>
5 #include <adf_admin.h>
6 #include <adf_cfg.h>
7 #include <adf_cfg_services.h>
8 #include <adf_clock.h>
9 #include <adf_common_drv.h>
10 #include <adf_fw_config.h>
11 #include <adf_gen4_config.h>
12 #include <adf_gen4_dc.h>
13 #include <adf_gen4_hw_csr_data.h>
14 #include <adf_gen4_hw_data.h>
15 #include <adf_gen4_pfvf.h>
16 #include <adf_gen4_pm.h>
17 #include <adf_gen4_ras.h>
18 #include <adf_gen4_timer.h>
19 #include <adf_gen4_tl.h>
20 #include <adf_gen4_vf_mig.h>
21 #include "adf_420xx_hw_data.h"
22 #include "icp_qat_hw.h"
23 
24 #define ADF_AE_GROUP_0		GENMASK(3, 0)
25 #define ADF_AE_GROUP_1		GENMASK(7, 4)
26 #define ADF_AE_GROUP_2		GENMASK(11, 8)
27 #define ADF_AE_GROUP_3		GENMASK(15, 12)
28 #define ADF_AE_GROUP_4		BIT(16)
29 
30 #define ENA_THD_MASK_ASYM	GENMASK(1, 0)
31 #define ENA_THD_MASK_SYM	GENMASK(3, 0)
32 #define ENA_THD_MASK_DC		GENMASK(1, 0)
33 
34 static const char * const adf_420xx_fw_objs[] = {
35 	[ADF_FW_SYM_OBJ] =  ADF_420XX_SYM_OBJ,
36 	[ADF_FW_ASYM_OBJ] =  ADF_420XX_ASYM_OBJ,
37 	[ADF_FW_DC_OBJ] =  ADF_420XX_DC_OBJ,
38 	[ADF_FW_ADMIN_OBJ] = ADF_420XX_ADMIN_OBJ,
39 };
40 
41 static const struct adf_fw_config adf_fw_cy_config[] = {
42 	{ADF_AE_GROUP_3, ADF_FW_SYM_OBJ},
43 	{ADF_AE_GROUP_2, ADF_FW_ASYM_OBJ},
44 	{ADF_AE_GROUP_1, ADF_FW_SYM_OBJ},
45 	{ADF_AE_GROUP_0, ADF_FW_ASYM_OBJ},
46 	{ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ},
47 };
48 
49 static const struct adf_fw_config adf_fw_dc_config[] = {
50 	{ADF_AE_GROUP_1, ADF_FW_DC_OBJ},
51 	{ADF_AE_GROUP_0, ADF_FW_DC_OBJ},
52 	{ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ},
53 };
54 
55 static const struct adf_fw_config adf_fw_sym_config[] = {
56 	{ADF_AE_GROUP_3, ADF_FW_SYM_OBJ},
57 	{ADF_AE_GROUP_2, ADF_FW_SYM_OBJ},
58 	{ADF_AE_GROUP_1, ADF_FW_SYM_OBJ},
59 	{ADF_AE_GROUP_0, ADF_FW_SYM_OBJ},
60 	{ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ},
61 };
62 
63 static const struct adf_fw_config adf_fw_asym_config[] = {
64 	{ADF_AE_GROUP_3, ADF_FW_ASYM_OBJ},
65 	{ADF_AE_GROUP_2, ADF_FW_ASYM_OBJ},
66 	{ADF_AE_GROUP_1, ADF_FW_ASYM_OBJ},
67 	{ADF_AE_GROUP_0, ADF_FW_ASYM_OBJ},
68 	{ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ},
69 };
70 
71 static const struct adf_fw_config adf_fw_asym_dc_config[] = {
72 	{ADF_AE_GROUP_3, ADF_FW_ASYM_OBJ},
73 	{ADF_AE_GROUP_2, ADF_FW_ASYM_OBJ},
74 	{ADF_AE_GROUP_1, ADF_FW_ASYM_OBJ},
75 	{ADF_AE_GROUP_0, ADF_FW_DC_OBJ},
76 	{ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ},
77 };
78 
79 static const struct adf_fw_config adf_fw_sym_dc_config[] = {
80 	{ADF_AE_GROUP_2, ADF_FW_SYM_OBJ},
81 	{ADF_AE_GROUP_1, ADF_FW_SYM_OBJ},
82 	{ADF_AE_GROUP_0, ADF_FW_DC_OBJ},
83 	{ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ},
84 };
85 
86 static const struct adf_fw_config adf_fw_dcc_config[] = {
87 	{ADF_AE_GROUP_1, ADF_FW_DC_OBJ},
88 	{ADF_AE_GROUP_0, ADF_FW_SYM_OBJ},
89 	{ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ},
90 };
91 
92 
93 static struct adf_hw_device_class adf_420xx_class = {
94 	.name = ADF_420XX_DEVICE_NAME,
95 	.type = DEV_420XX,
96 	.instances = 0,
97 };
98 
get_ae_mask(struct adf_hw_device_data * self)99 static u32 get_ae_mask(struct adf_hw_device_data *self)
100 {
101 	u32 me_disable = self->fuses;
102 
103 	return ~me_disable & ADF_420XX_ACCELENGINES_MASK;
104 }
105 
uof_get_num_objs(struct adf_accel_dev * accel_dev)106 static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev)
107 {
108 	switch (adf_get_service_enabled(accel_dev)) {
109 	case SVC_CY:
110 	case SVC_CY2:
111 		return ARRAY_SIZE(adf_fw_cy_config);
112 	case SVC_DC:
113 		return ARRAY_SIZE(adf_fw_dc_config);
114 	case SVC_DCC:
115 		return ARRAY_SIZE(adf_fw_dcc_config);
116 	case SVC_SYM:
117 		return ARRAY_SIZE(adf_fw_sym_config);
118 	case SVC_ASYM:
119 		return ARRAY_SIZE(adf_fw_asym_config);
120 	case SVC_ASYM_DC:
121 	case SVC_DC_ASYM:
122 		return ARRAY_SIZE(adf_fw_asym_dc_config);
123 	case SVC_SYM_DC:
124 	case SVC_DC_SYM:
125 		return ARRAY_SIZE(adf_fw_sym_dc_config);
126 	default:
127 		return 0;
128 	}
129 }
130 
get_fw_config(struct adf_accel_dev * accel_dev)131 static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev)
132 {
133 	switch (adf_get_service_enabled(accel_dev)) {
134 	case SVC_CY:
135 	case SVC_CY2:
136 		return adf_fw_cy_config;
137 	case SVC_DC:
138 		return adf_fw_dc_config;
139 	case SVC_DCC:
140 		return adf_fw_dcc_config;
141 	case SVC_SYM:
142 		return adf_fw_sym_config;
143 	case SVC_ASYM:
144 		return adf_fw_asym_config;
145 	case SVC_ASYM_DC:
146 	case SVC_DC_ASYM:
147 		return adf_fw_asym_dc_config;
148 	case SVC_SYM_DC:
149 	case SVC_DC_SYM:
150 		return adf_fw_sym_dc_config;
151 	default:
152 		return NULL;
153 	}
154 }
155 
update_ae_mask(struct adf_accel_dev * accel_dev)156 static void update_ae_mask(struct adf_accel_dev *accel_dev)
157 {
158 	struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
159 	const struct adf_fw_config *fw_config;
160 	u32 config_ae_mask = 0;
161 	u32 ae_mask, num_objs;
162 	int i;
163 
164 	ae_mask = get_ae_mask(hw_data);
165 
166 	/* Modify the AE mask based on the firmware configuration loaded */
167 	fw_config = get_fw_config(accel_dev);
168 	num_objs = uof_get_num_objs(accel_dev);
169 
170 	config_ae_mask |= ADF_420XX_ADMIN_AE_MASK;
171 	for (i = 0; i < num_objs; i++)
172 		config_ae_mask |= fw_config[i].ae_mask;
173 
174 	hw_data->ae_mask = ae_mask & config_ae_mask;
175 }
176 
get_accel_cap(struct adf_accel_dev * accel_dev)177 static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
178 {
179 	u32 capabilities_sym, capabilities_asym, capabilities_dc;
180 	struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
181 	u32 capabilities_dcc;
182 	u32 fusectl1;
183 
184 	/* As a side effect, update ae_mask based on configuration */
185 	update_ae_mask(accel_dev);
186 
187 	/* Read accelerator capabilities mask */
188 	pci_read_config_dword(pdev, ADF_GEN4_FUSECTL1_OFFSET, &fusectl1);
189 
190 	capabilities_sym = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
191 			  ICP_ACCEL_CAPABILITIES_CIPHER |
192 			  ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
193 			  ICP_ACCEL_CAPABILITIES_SHA3 |
194 			  ICP_ACCEL_CAPABILITIES_SHA3_EXT |
195 			  ICP_ACCEL_CAPABILITIES_HKDF |
196 			  ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
197 			  ICP_ACCEL_CAPABILITIES_AESGCM_SPC |
198 			  ICP_ACCEL_CAPABILITIES_SM3 |
199 			  ICP_ACCEL_CAPABILITIES_SM4 |
200 			  ICP_ACCEL_CAPABILITIES_AES_V2 |
201 			  ICP_ACCEL_CAPABILITIES_ZUC |
202 			  ICP_ACCEL_CAPABILITIES_WIRELESS_CRYPTO_EXT |
203 			  ICP_ACCEL_CAPABILITIES_EXT_ALGCHAIN;
204 
205 	/* A set bit in fusectl1 means the feature is OFF in this SKU */
206 	if (fusectl1 & ICP_ACCEL_GEN4_MASK_CIPHER_SLICE) {
207 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
208 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_HKDF;
209 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
210 	}
211 
212 	if (fusectl1 & ICP_ACCEL_GEN4_MASK_UCS_SLICE) {
213 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY;
214 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC;
215 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AES_V2;
216 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
217 	}
218 
219 	if (fusectl1 & ICP_ACCEL_GEN4_MASK_AUTH_SLICE) {
220 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
221 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3;
222 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT;
223 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
224 	}
225 
226 	if (fusectl1 & ICP_ACCEL_GEN4_MASK_SMX_SLICE) {
227 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM3;
228 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM4;
229 	}
230 
231 	if (fusectl1 & ICP_ACCEL_GEN4_MASK_WCP_WAT_SLICE) {
232 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC;
233 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_WIRELESS_CRYPTO_EXT;
234 	}
235 
236 	if (fusectl1 & ICP_ACCEL_GEN4_MASK_EIA3_SLICE)
237 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC;
238 
239 	capabilities_asym = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
240 			  ICP_ACCEL_CAPABILITIES_SM2 |
241 			  ICP_ACCEL_CAPABILITIES_ECEDMONT;
242 
243 	if (fusectl1 & ICP_ACCEL_GEN4_MASK_PKE_SLICE) {
244 		capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
245 		capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_SM2;
246 		capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT;
247 	}
248 
249 	capabilities_dc = ICP_ACCEL_CAPABILITIES_COMPRESSION |
250 			  ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION |
251 			  ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION |
252 			  ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
253 
254 	if (fusectl1 & ICP_ACCEL_GEN4_MASK_COMPRESS_SLICE) {
255 		capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
256 		capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION;
257 		capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION;
258 		capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
259 	}
260 
261 	switch (adf_get_service_enabled(accel_dev)) {
262 	case SVC_CY:
263 	case SVC_CY2:
264 		return capabilities_sym | capabilities_asym;
265 	case SVC_DC:
266 		return capabilities_dc;
267 	case SVC_DCC:
268 		/*
269 		 * Sym capabilities are available for chaining operations,
270 		 * but sym crypto instances cannot be supported
271 		 */
272 		capabilities_dcc = capabilities_dc | capabilities_sym;
273 		capabilities_dcc &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
274 		return capabilities_dcc;
275 	case SVC_SYM:
276 		return capabilities_sym;
277 	case SVC_ASYM:
278 		return capabilities_asym;
279 	case SVC_ASYM_DC:
280 	case SVC_DC_ASYM:
281 		return capabilities_asym | capabilities_dc;
282 	case SVC_SYM_DC:
283 	case SVC_DC_SYM:
284 		return capabilities_sym | capabilities_dc;
285 	default:
286 		return 0;
287 	}
288 }
289 
adf_get_arbiter_mapping(struct adf_accel_dev * accel_dev)290 static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
291 {
292 	if (adf_gen4_init_thd2arb_map(accel_dev))
293 		dev_warn(&GET_DEV(accel_dev),
294 			 "Failed to generate thread to arbiter mapping");
295 
296 	return GET_HW_DATA(accel_dev)->thd_to_arb_map;
297 }
298 
adf_init_rl_data(struct adf_rl_hw_data * rl_data)299 static void adf_init_rl_data(struct adf_rl_hw_data *rl_data)
300 {
301 	rl_data->pciout_tb_offset = ADF_GEN4_RL_TOKEN_PCIEOUT_BUCKET_OFFSET;
302 	rl_data->pciin_tb_offset = ADF_GEN4_RL_TOKEN_PCIEIN_BUCKET_OFFSET;
303 	rl_data->r2l_offset = ADF_GEN4_RL_R2L_OFFSET;
304 	rl_data->l2c_offset = ADF_GEN4_RL_L2C_OFFSET;
305 	rl_data->c2s_offset = ADF_GEN4_RL_C2S_OFFSET;
306 
307 	rl_data->pcie_scale_div = ADF_420XX_RL_PCIE_SCALE_FACTOR_DIV;
308 	rl_data->pcie_scale_mul = ADF_420XX_RL_PCIE_SCALE_FACTOR_MUL;
309 	rl_data->dcpr_correction = ADF_420XX_RL_DCPR_CORRECTION;
310 	rl_data->max_tp[ADF_SVC_ASYM] = ADF_420XX_RL_MAX_TP_ASYM;
311 	rl_data->max_tp[ADF_SVC_SYM] = ADF_420XX_RL_MAX_TP_SYM;
312 	rl_data->max_tp[ADF_SVC_DC] = ADF_420XX_RL_MAX_TP_DC;
313 	rl_data->scan_interval = ADF_420XX_RL_SCANS_PER_SEC;
314 	rl_data->scale_ref = ADF_420XX_RL_SLICE_REF;
315 }
316 
get_rp_group(struct adf_accel_dev * accel_dev,u32 ae_mask)317 static int get_rp_group(struct adf_accel_dev *accel_dev, u32 ae_mask)
318 {
319 	switch (ae_mask) {
320 	case ADF_AE_GROUP_0:
321 		return RP_GROUP_0;
322 	case ADF_AE_GROUP_1:
323 	case ADF_AE_GROUP_3:
324 		return RP_GROUP_1;
325 	case ADF_AE_GROUP_2:
326 		if (get_fw_config(accel_dev) == adf_fw_cy_config)
327 			return RP_GROUP_0;
328 		else
329 			return RP_GROUP_1;
330 	default:
331 		dev_dbg(&GET_DEV(accel_dev), "ae_mask not recognized");
332 		return -EINVAL;
333 	}
334 }
335 
get_ena_thd_mask(struct adf_accel_dev * accel_dev,u32 obj_num)336 static u32 get_ena_thd_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
337 {
338 	const struct adf_fw_config *fw_config;
339 
340 	if (obj_num >= uof_get_num_objs(accel_dev))
341 		return ADF_GEN4_ENA_THD_MASK_ERROR;
342 
343 	fw_config = get_fw_config(accel_dev);
344 	if (!fw_config)
345 		return ADF_GEN4_ENA_THD_MASK_ERROR;
346 
347 	switch (fw_config[obj_num].obj) {
348 	case ADF_FW_ASYM_OBJ:
349 		return ENA_THD_MASK_ASYM;
350 	case ADF_FW_SYM_OBJ:
351 		return ENA_THD_MASK_SYM;
352 	case ADF_FW_DC_OBJ:
353 		return ENA_THD_MASK_DC;
354 	default:
355 		return ADF_GEN4_ENA_THD_MASK_ERROR;
356 	}
357 }
358 
uof_get_name(struct adf_accel_dev * accel_dev,u32 obj_num,const char * const fw_objs[],int num_objs)359 static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num,
360 				const char * const fw_objs[], int num_objs)
361 {
362 	const struct adf_fw_config *fw_config;
363 	int id;
364 
365 	fw_config = get_fw_config(accel_dev);
366 	if (fw_config)
367 		id = fw_config[obj_num].obj;
368 	else
369 		id = -EINVAL;
370 
371 	if (id < 0 || id >= num_objs)
372 		return NULL;
373 
374 	return fw_objs[id];
375 }
376 
uof_get_name_420xx(struct adf_accel_dev * accel_dev,u32 obj_num)377 static const char *uof_get_name_420xx(struct adf_accel_dev *accel_dev, u32 obj_num)
378 {
379 	int num_fw_objs = ARRAY_SIZE(adf_420xx_fw_objs);
380 
381 	return uof_get_name(accel_dev, obj_num, adf_420xx_fw_objs, num_fw_objs);
382 }
383 
uof_get_obj_type(struct adf_accel_dev * accel_dev,u32 obj_num)384 static int uof_get_obj_type(struct adf_accel_dev *accel_dev, u32 obj_num)
385 {
386 	const struct adf_fw_config *fw_config;
387 
388 	if (obj_num >= uof_get_num_objs(accel_dev))
389 		return -EINVAL;
390 
391 	fw_config = get_fw_config(accel_dev);
392 	if (!fw_config)
393 		return -EINVAL;
394 
395 	return fw_config[obj_num].obj;
396 }
397 
uof_get_ae_mask(struct adf_accel_dev * accel_dev,u32 obj_num)398 static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
399 {
400 	const struct adf_fw_config *fw_config;
401 
402 	fw_config = get_fw_config(accel_dev);
403 	if (!fw_config)
404 		return 0;
405 
406 	return fw_config[obj_num].ae_mask;
407 }
408 
adf_gen4_set_err_mask(struct adf_dev_err_mask * dev_err_mask)409 static void adf_gen4_set_err_mask(struct adf_dev_err_mask *dev_err_mask)
410 {
411 	dev_err_mask->cppagentcmdpar_mask = ADF_420XX_HICPPAGENTCMDPARERRLOG_MASK;
412 	dev_err_mask->parerr_ath_cph_mask = ADF_420XX_PARITYERRORMASK_ATH_CPH_MASK;
413 	dev_err_mask->parerr_cpr_xlt_mask = ADF_420XX_PARITYERRORMASK_CPR_XLT_MASK;
414 	dev_err_mask->parerr_dcpr_ucs_mask = ADF_420XX_PARITYERRORMASK_DCPR_UCS_MASK;
415 	dev_err_mask->parerr_pke_mask = ADF_420XX_PARITYERRORMASK_PKE_MASK;
416 	dev_err_mask->parerr_wat_wcp_mask = ADF_420XX_PARITYERRORMASK_WAT_WCP_MASK;
417 	dev_err_mask->ssmfeatren_mask = ADF_420XX_SSMFEATREN_MASK;
418 }
419 
adf_init_hw_data_420xx(struct adf_hw_device_data * hw_data,u32 dev_id)420 void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id)
421 {
422 	hw_data->dev_class = &adf_420xx_class;
423 	hw_data->instance_id = adf_420xx_class.instances++;
424 	hw_data->num_banks = ADF_GEN4_ETR_MAX_BANKS;
425 	hw_data->num_banks_per_vf = ADF_GEN4_NUM_BANKS_PER_VF;
426 	hw_data->num_rings_per_bank = ADF_GEN4_NUM_RINGS_PER_BANK;
427 	hw_data->num_accel = ADF_GEN4_MAX_ACCELERATORS;
428 	hw_data->num_engines = ADF_420XX_MAX_ACCELENGINES;
429 	hw_data->num_logical_accel = 1;
430 	hw_data->tx_rx_gap = ADF_GEN4_RX_RINGS_OFFSET;
431 	hw_data->tx_rings_mask = ADF_GEN4_TX_RINGS_MASK;
432 	hw_data->ring_to_svc_map = ADF_GEN4_DEFAULT_RING_TO_SRV_MAP;
433 	hw_data->alloc_irq = adf_isr_resource_alloc;
434 	hw_data->free_irq = adf_isr_resource_free;
435 	hw_data->enable_error_correction = adf_gen4_enable_error_correction;
436 	hw_data->get_accel_mask = adf_gen4_get_accel_mask;
437 	hw_data->get_ae_mask = get_ae_mask;
438 	hw_data->get_num_accels = adf_gen4_get_num_accels;
439 	hw_data->get_num_aes = adf_gen4_get_num_aes;
440 	hw_data->get_sram_bar_id = adf_gen4_get_sram_bar_id;
441 	hw_data->get_etr_bar_id = adf_gen4_get_etr_bar_id;
442 	hw_data->get_misc_bar_id = adf_gen4_get_misc_bar_id;
443 	hw_data->get_arb_info = adf_gen4_get_arb_info;
444 	hw_data->get_admin_info = adf_gen4_get_admin_info;
445 	hw_data->get_accel_cap = get_accel_cap;
446 	hw_data->get_sku = adf_gen4_get_sku;
447 	hw_data->init_admin_comms = adf_init_admin_comms;
448 	hw_data->exit_admin_comms = adf_exit_admin_comms;
449 	hw_data->send_admin_init = adf_send_admin_init;
450 	hw_data->init_arb = adf_init_arb;
451 	hw_data->exit_arb = adf_exit_arb;
452 	hw_data->get_arb_mapping = adf_get_arbiter_mapping;
453 	hw_data->enable_ints = adf_gen4_enable_ints;
454 	hw_data->init_device = adf_gen4_init_device;
455 	hw_data->reset_device = adf_reset_flr;
456 	hw_data->admin_ae_mask = ADF_420XX_ADMIN_AE_MASK;
457 	hw_data->num_rps = ADF_GEN4_MAX_RPS;
458 	hw_data->fw_name = ADF_420XX_FW;
459 	hw_data->fw_mmp_name = ADF_420XX_MMP;
460 	hw_data->uof_get_name = uof_get_name_420xx;
461 	hw_data->uof_get_num_objs = uof_get_num_objs;
462 	hw_data->uof_get_obj_type = uof_get_obj_type;
463 	hw_data->uof_get_ae_mask = uof_get_ae_mask;
464 	hw_data->get_rp_group = get_rp_group;
465 	hw_data->get_ena_thd_mask = get_ena_thd_mask;
466 	hw_data->set_msix_rttable = adf_gen4_set_msix_default_rttable;
467 	hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
468 	hw_data->get_ring_to_svc_map = adf_gen4_get_ring_to_svc_map;
469 	hw_data->disable_iov = adf_disable_sriov;
470 	hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
471 	hw_data->enable_pm = adf_gen4_enable_pm;
472 	hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt;
473 	hw_data->dev_config = adf_gen4_dev_config;
474 	hw_data->start_timer = adf_gen4_timer_start;
475 	hw_data->stop_timer = adf_gen4_timer_stop;
476 	hw_data->get_hb_clock = adf_gen4_get_heartbeat_clock;
477 	hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
478 	hw_data->clock_frequency = ADF_420XX_AE_FREQ;
479 
480 	adf_gen4_set_err_mask(&hw_data->dev_err_mask);
481 	adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
482 	adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops);
483 	adf_gen4_init_dc_ops(&hw_data->dc_ops);
484 	adf_gen4_init_ras_ops(&hw_data->ras_ops);
485 	adf_gen4_init_tl_data(&hw_data->tl_data);
486 	adf_gen4_init_vf_mig_ops(&hw_data->vfmig_ops);
487 	adf_init_rl_data(&hw_data->rl_data);
488 }
489 
adf_clean_hw_data_420xx(struct adf_hw_device_data * hw_data)490 void adf_clean_hw_data_420xx(struct adf_hw_device_data *hw_data)
491 {
492 	hw_data->dev_class->instances--;
493 }
494