• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /******************************************************************************
2  * This software may be used and distributed according to the terms of
3  * the GNU General Public License (GPL), incorporated herein by reference.
4  * Drivers based on or derived from this code fall under the GPL and must
5  * retain the authorship, copyright and license notice.  This file is not
6  * a complete program and may only be used when the entire operating
7  * system is licensed under the GPL.
8  * See the file COPYING in this distribution for more information.
9  *
10  * vxge-traffic.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11  *                 Virtualized Server Adapter.
12  * Copyright(c) 2002-2010 Exar Corp.
13  ******************************************************************************/
14 #include <linux/etherdevice.h>
15 #include <linux/io-64-nonatomic-lo-hi.h>
16 #include <linux/prefetch.h>
17 
18 #include "vxge-traffic.h"
19 #include "vxge-config.h"
20 #include "vxge-main.h"
21 
22 /*
23  * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
24  * @vp: Virtual Path handle.
25  *
26  * Enable vpath interrupts. The function is to be executed the last in
27  * vpath initialization sequence.
28  *
29  * See also: vxge_hw_vpath_intr_disable()
30  */
vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle * vp)31 enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
32 {
33 	struct __vxge_hw_virtualpath *vpath;
34 	struct vxge_hw_vpath_reg __iomem *vp_reg;
35 	enum vxge_hw_status status = VXGE_HW_OK;
36 	if (vp == NULL) {
37 		status = VXGE_HW_ERR_INVALID_HANDLE;
38 		goto exit;
39 	}
40 
41 	vpath = vp->vpath;
42 
43 	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
44 		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
45 		goto exit;
46 	}
47 
48 	vp_reg = vpath->vp_reg;
49 
50 	writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
51 
52 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
53 			&vp_reg->general_errors_reg);
54 
55 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
56 			&vp_reg->pci_config_errors_reg);
57 
58 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
59 			&vp_reg->mrpcim_to_vpath_alarm_reg);
60 
61 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
62 			&vp_reg->srpcim_to_vpath_alarm_reg);
63 
64 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
65 			&vp_reg->vpath_ppif_int_status);
66 
67 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
68 			&vp_reg->srpcim_msg_to_vpath_reg);
69 
70 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
71 			&vp_reg->vpath_pcipif_int_status);
72 
73 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
74 			&vp_reg->prc_alarm_reg);
75 
76 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
77 			&vp_reg->wrdma_alarm_status);
78 
79 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
80 			&vp_reg->asic_ntwk_vp_err_reg);
81 
82 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
83 			&vp_reg->xgmac_vp_int_status);
84 
85 	readq(&vp_reg->vpath_general_int_status);
86 
87 	/* Mask unwanted interrupts */
88 
89 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
90 			&vp_reg->vpath_pcipif_int_mask);
91 
92 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
93 			&vp_reg->srpcim_msg_to_vpath_mask);
94 
95 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
96 			&vp_reg->srpcim_to_vpath_alarm_mask);
97 
98 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
99 			&vp_reg->mrpcim_to_vpath_alarm_mask);
100 
101 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
102 			&vp_reg->pci_config_errors_mask);
103 
104 	/* Unmask the individual interrupts */
105 
106 	writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
107 		VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
108 		VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
109 		VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
110 		&vp_reg->general_errors_mask);
111 
112 	__vxge_hw_pio_mem_write32_upper(
113 		(u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
114 		VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
115 		VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
116 		VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
117 		VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
118 		VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
119 		&vp_reg->kdfcctl_errors_mask);
120 
121 	__vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
122 
123 	__vxge_hw_pio_mem_write32_upper(
124 		(u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
125 		&vp_reg->prc_alarm_mask);
126 
127 	__vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
128 	__vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
129 
130 	if (vpath->hldev->first_vp_id != vpath->vp_id)
131 		__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
132 			&vp_reg->asic_ntwk_vp_err_mask);
133 	else
134 		__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
135 		VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT |
136 		VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32),
137 		&vp_reg->asic_ntwk_vp_err_mask);
138 
139 	__vxge_hw_pio_mem_write32_upper(0,
140 		&vp_reg->vpath_general_int_mask);
141 exit:
142 	return status;
143 
144 }
145 
146 /*
147  * vxge_hw_vpath_intr_disable - Disable vpath interrupts.
148  * @vp: Virtual Path handle.
149  *
150  * Disable vpath interrupts. The function is to be executed the last in
151  * vpath initialization sequence.
152  *
153  * See also: vxge_hw_vpath_intr_enable()
154  */
vxge_hw_vpath_intr_disable(struct __vxge_hw_vpath_handle * vp)155 enum vxge_hw_status vxge_hw_vpath_intr_disable(
156 			struct __vxge_hw_vpath_handle *vp)
157 {
158 	struct __vxge_hw_virtualpath *vpath;
159 	enum vxge_hw_status status = VXGE_HW_OK;
160 	struct vxge_hw_vpath_reg __iomem *vp_reg;
161 	if (vp == NULL) {
162 		status = VXGE_HW_ERR_INVALID_HANDLE;
163 		goto exit;
164 	}
165 
166 	vpath = vp->vpath;
167 
168 	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
169 		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
170 		goto exit;
171 	}
172 	vp_reg = vpath->vp_reg;
173 
174 	__vxge_hw_pio_mem_write32_upper(
175 		(u32)VXGE_HW_INTR_MASK_ALL,
176 		&vp_reg->vpath_general_int_mask);
177 
178 	writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
179 
180 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
181 			&vp_reg->general_errors_mask);
182 
183 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
184 			&vp_reg->pci_config_errors_mask);
185 
186 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
187 			&vp_reg->mrpcim_to_vpath_alarm_mask);
188 
189 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
190 			&vp_reg->srpcim_to_vpath_alarm_mask);
191 
192 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
193 			&vp_reg->vpath_ppif_int_mask);
194 
195 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
196 			&vp_reg->srpcim_msg_to_vpath_mask);
197 
198 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
199 			&vp_reg->vpath_pcipif_int_mask);
200 
201 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
202 			&vp_reg->wrdma_alarm_mask);
203 
204 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
205 			&vp_reg->prc_alarm_mask);
206 
207 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
208 			&vp_reg->xgmac_vp_int_mask);
209 
210 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
211 			&vp_reg->asic_ntwk_vp_err_mask);
212 
213 exit:
214 	return status;
215 }
216 
vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo * fifo)217 void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo)
218 {
219 	struct vxge_hw_vpath_reg __iomem *vp_reg;
220 	struct vxge_hw_vp_config *config;
221 	u64 val64;
222 
223 	if (fifo->config->enable != VXGE_HW_FIFO_ENABLE)
224 		return;
225 
226 	vp_reg = fifo->vp_reg;
227 	config = container_of(fifo->config, struct vxge_hw_vp_config, fifo);
228 
229 	if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
230 		config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
231 		val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
232 		val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
233 		fifo->tim_tti_cfg1_saved = val64;
234 		writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
235 	}
236 }
237 
vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring * ring)238 void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring)
239 {
240 	u64 val64 = ring->tim_rti_cfg1_saved;
241 
242 	val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
243 	ring->tim_rti_cfg1_saved = val64;
244 	writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
245 }
246 
vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo * fifo)247 void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo)
248 {
249 	u64 val64 = fifo->tim_tti_cfg3_saved;
250 	u64 timer = (fifo->rtimer * 1000) / 272;
251 
252 	val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
253 	if (timer)
254 		val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
255 			VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(5);
256 
257 	writeq(val64, &fifo->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
258 	/* tti_cfg3_saved is not updated again because it is
259 	 * initialized at one place only - init time.
260 	 */
261 }
262 
vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring * ring)263 void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring)
264 {
265 	u64 val64 = ring->tim_rti_cfg3_saved;
266 	u64 timer = (ring->rtimer * 1000) / 272;
267 
268 	val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
269 	if (timer)
270 		val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
271 			VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(4);
272 
273 	writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
274 	/* rti_cfg3_saved is not updated again because it is
275 	 * initialized at one place only - init time.
276 	 */
277 }
278 
279 /**
280  * vxge_hw_channel_msix_mask - Mask MSIX Vector.
281  * @channel: Channel for rx or tx handle
282  * @msix_id:  MSIX ID
283  *
284  * The function masks the msix interrupt for the given msix_id
285  *
286  * Returns: 0
287  */
vxge_hw_channel_msix_mask(struct __vxge_hw_channel * channel,int msix_id)288 void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
289 {
290 
291 	__vxge_hw_pio_mem_write32_upper(
292 		(u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
293 		&channel->common_reg->set_msix_mask_vect[msix_id%4]);
294 }
295 
296 /**
297  * vxge_hw_channel_msix_unmask - Unmask the MSIX Vector.
298  * @channel: Channel for rx or tx handle
299  * @msix_id:  MSI ID
300  *
301  * The function unmasks the msix interrupt for the given msix_id
302  *
303  * Returns: 0
304  */
305 void
vxge_hw_channel_msix_unmask(struct __vxge_hw_channel * channel,int msix_id)306 vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
307 {
308 
309 	__vxge_hw_pio_mem_write32_upper(
310 		(u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
311 		&channel->common_reg->clear_msix_mask_vect[msix_id%4]);
312 }
313 
314 /**
315  * vxge_hw_channel_msix_clear - Unmask the MSIX Vector.
316  * @channel: Channel for rx or tx handle
317  * @msix_id:  MSI ID
318  *
319  * The function unmasks the msix interrupt for the given msix_id
320  * if configured in MSIX oneshot mode
321  *
322  * Returns: 0
323  */
vxge_hw_channel_msix_clear(struct __vxge_hw_channel * channel,int msix_id)324 void vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channel, int msix_id)
325 {
326 	__vxge_hw_pio_mem_write32_upper(
327 		(u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
328 		&channel->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
329 }
330 
331 /**
332  * vxge_hw_device_set_intr_type - Updates the configuration
333  *		with new interrupt type.
334  * @hldev: HW device handle.
335  * @intr_mode: New interrupt type
336  */
vxge_hw_device_set_intr_type(struct __vxge_hw_device * hldev,u32 intr_mode)337 u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode)
338 {
339 
340 	if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
341 	   (intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
342 	   (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
343 	   (intr_mode != VXGE_HW_INTR_MODE_DEF))
344 		intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
345 
346 	hldev->config.intr_mode = intr_mode;
347 	return intr_mode;
348 }
349 
350 /**
351  * vxge_hw_device_intr_enable - Enable interrupts.
352  * @hldev: HW device handle.
353  *
354  * Enable Titan interrupts. The function is to be executed the last in
355  * Titan initialization sequence.
356  *
357  * See also: vxge_hw_device_intr_disable()
358  */
vxge_hw_device_intr_enable(struct __vxge_hw_device * hldev)359 void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
360 {
361 	u32 i;
362 	u64 val64;
363 	u32 val32;
364 
365 	vxge_hw_device_mask_all(hldev);
366 
367 	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
368 
369 		if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
370 			continue;
371 
372 		vxge_hw_vpath_intr_enable(
373 			VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
374 	}
375 
376 	if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) {
377 		val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
378 			hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
379 
380 		if (val64 != 0) {
381 			writeq(val64, &hldev->common_reg->tim_int_status0);
382 
383 			writeq(~val64, &hldev->common_reg->tim_int_mask0);
384 		}
385 
386 		val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
387 			hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
388 
389 		if (val32 != 0) {
390 			__vxge_hw_pio_mem_write32_upper(val32,
391 					&hldev->common_reg->tim_int_status1);
392 
393 			__vxge_hw_pio_mem_write32_upper(~val32,
394 					&hldev->common_reg->tim_int_mask1);
395 		}
396 	}
397 
398 	val64 = readq(&hldev->common_reg->titan_general_int_status);
399 
400 	vxge_hw_device_unmask_all(hldev);
401 }
402 
403 /**
404  * vxge_hw_device_intr_disable - Disable Titan interrupts.
405  * @hldev: HW device handle.
406  *
407  * Disable Titan interrupts.
408  *
409  * See also: vxge_hw_device_intr_enable()
410  */
vxge_hw_device_intr_disable(struct __vxge_hw_device * hldev)411 void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
412 {
413 	u32 i;
414 
415 	vxge_hw_device_mask_all(hldev);
416 
417 	/* mask all the tim interrupts */
418 	writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
419 	__vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
420 		&hldev->common_reg->tim_int_mask1);
421 
422 	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
423 
424 		if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
425 			continue;
426 
427 		vxge_hw_vpath_intr_disable(
428 			VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
429 	}
430 }
431 
432 /**
433  * vxge_hw_device_mask_all - Mask all device interrupts.
434  * @hldev: HW device handle.
435  *
436  * Mask	all device interrupts.
437  *
438  * See also: vxge_hw_device_unmask_all()
439  */
vxge_hw_device_mask_all(struct __vxge_hw_device * hldev)440 void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
441 {
442 	u64 val64;
443 
444 	val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
445 		VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
446 
447 	__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
448 				&hldev->common_reg->titan_mask_all_int);
449 }
450 
451 /**
452  * vxge_hw_device_unmask_all - Unmask all device interrupts.
453  * @hldev: HW device handle.
454  *
455  * Unmask all device interrupts.
456  *
457  * See also: vxge_hw_device_mask_all()
458  */
vxge_hw_device_unmask_all(struct __vxge_hw_device * hldev)459 void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
460 {
461 	u64 val64 = 0;
462 
463 	if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE)
464 		val64 =  VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
465 
466 	__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
467 			&hldev->common_reg->titan_mask_all_int);
468 }
469 
470 /**
471  * vxge_hw_device_flush_io - Flush io writes.
472  * @hldev: HW device handle.
473  *
474  * The function	performs a read operation to flush io writes.
475  *
476  * Returns: void
477  */
vxge_hw_device_flush_io(struct __vxge_hw_device * hldev)478 void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
479 {
480 	readl(&hldev->common_reg->titan_general_int_status);
481 }
482 
483 /**
484  * __vxge_hw_device_handle_error - Handle error
485  * @hldev: HW device
486  * @vp_id: Vpath Id
487  * @type: Error type. Please see enum vxge_hw_event{}
488  *
489  * Handle error.
490  */
491 static enum vxge_hw_status
__vxge_hw_device_handle_error(struct __vxge_hw_device * hldev,u32 vp_id,enum vxge_hw_event type)492 __vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id,
493 			      enum vxge_hw_event type)
494 {
495 	switch (type) {
496 	case VXGE_HW_EVENT_UNKNOWN:
497 		break;
498 	case VXGE_HW_EVENT_RESET_START:
499 	case VXGE_HW_EVENT_RESET_COMPLETE:
500 	case VXGE_HW_EVENT_LINK_DOWN:
501 	case VXGE_HW_EVENT_LINK_UP:
502 		goto out;
503 	case VXGE_HW_EVENT_ALARM_CLEARED:
504 		goto out;
505 	case VXGE_HW_EVENT_ECCERR:
506 	case VXGE_HW_EVENT_MRPCIM_ECCERR:
507 		goto out;
508 	case VXGE_HW_EVENT_FIFO_ERR:
509 	case VXGE_HW_EVENT_VPATH_ERR:
510 	case VXGE_HW_EVENT_CRITICAL_ERR:
511 	case VXGE_HW_EVENT_SERR:
512 		break;
513 	case VXGE_HW_EVENT_SRPCIM_SERR:
514 	case VXGE_HW_EVENT_MRPCIM_SERR:
515 		goto out;
516 	case VXGE_HW_EVENT_SLOT_FREEZE:
517 		break;
518 	default:
519 		vxge_assert(0);
520 		goto out;
521 	}
522 
523 	/* notify driver */
524 	if (hldev->uld_callbacks->crit_err)
525 		hldev->uld_callbacks->crit_err(hldev,
526 			type, vp_id);
527 out:
528 
529 	return VXGE_HW_OK;
530 }
531 
532 /*
533  * __vxge_hw_device_handle_link_down_ind
534  * @hldev: HW device handle.
535  *
536  * Link down indication handler. The function is invoked by HW when
537  * Titan indicates that the link is down.
538  */
539 static enum vxge_hw_status
__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device * hldev)540 __vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
541 {
542 	/*
543 	 * If the previous link state is not down, return.
544 	 */
545 	if (hldev->link_state == VXGE_HW_LINK_DOWN)
546 		goto exit;
547 
548 	hldev->link_state = VXGE_HW_LINK_DOWN;
549 
550 	/* notify driver */
551 	if (hldev->uld_callbacks->link_down)
552 		hldev->uld_callbacks->link_down(hldev);
553 exit:
554 	return VXGE_HW_OK;
555 }
556 
557 /*
558  * __vxge_hw_device_handle_link_up_ind
559  * @hldev: HW device handle.
560  *
561  * Link up indication handler. The function is invoked by HW when
562  * Titan indicates that the link is up for programmable amount of time.
563  */
564 static enum vxge_hw_status
__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device * hldev)565 __vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
566 {
567 	/*
568 	 * If the previous link state is not down, return.
569 	 */
570 	if (hldev->link_state == VXGE_HW_LINK_UP)
571 		goto exit;
572 
573 	hldev->link_state = VXGE_HW_LINK_UP;
574 
575 	/* notify driver */
576 	if (hldev->uld_callbacks->link_up)
577 		hldev->uld_callbacks->link_up(hldev);
578 exit:
579 	return VXGE_HW_OK;
580 }
581 
582 /*
583  * __vxge_hw_vpath_alarm_process - Process Alarms.
584  * @vpath: Virtual Path.
585  * @skip_alarms: Do not clear the alarms
586  *
587  * Process vpath alarms.
588  *
589  */
590 static enum vxge_hw_status
__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath * vpath,u32 skip_alarms)591 __vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
592 			      u32 skip_alarms)
593 {
594 	u64 val64;
595 	u64 alarm_status;
596 	u64 pic_status;
597 	struct __vxge_hw_device *hldev = NULL;
598 	enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
599 	u64 mask64;
600 	struct vxge_hw_vpath_stats_sw_info *sw_stats;
601 	struct vxge_hw_vpath_reg __iomem *vp_reg;
602 
603 	if (vpath == NULL) {
604 		alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
605 			alarm_event);
606 		goto out2;
607 	}
608 
609 	hldev = vpath->hldev;
610 	vp_reg = vpath->vp_reg;
611 	alarm_status = readq(&vp_reg->vpath_general_int_status);
612 
613 	if (alarm_status == VXGE_HW_ALL_FOXES) {
614 		alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
615 			alarm_event);
616 		goto out;
617 	}
618 
619 	sw_stats = vpath->sw_stats;
620 
621 	if (alarm_status & ~(
622 		VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
623 		VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
624 		VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
625 		VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
626 		sw_stats->error_stats.unknown_alarms++;
627 
628 		alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
629 			alarm_event);
630 		goto out;
631 	}
632 
633 	if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
634 
635 		val64 = readq(&vp_reg->xgmac_vp_int_status);
636 
637 		if (val64 &
638 		VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
639 
640 			val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
641 
642 			if (((val64 &
643 			      VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
644 			     (!(val64 &
645 				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
646 			    ((val64 &
647 			     VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
648 			     (!(val64 &
649 				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
650 				     ))) {
651 				sw_stats->error_stats.network_sustained_fault++;
652 
653 				writeq(
654 				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
655 					&vp_reg->asic_ntwk_vp_err_mask);
656 
657 				__vxge_hw_device_handle_link_down_ind(hldev);
658 				alarm_event = VXGE_HW_SET_LEVEL(
659 					VXGE_HW_EVENT_LINK_DOWN, alarm_event);
660 			}
661 
662 			if (((val64 &
663 			      VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
664 			     (!(val64 &
665 				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
666 			    ((val64 &
667 			      VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
668 			     (!(val64 &
669 				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
670 				     ))) {
671 
672 				sw_stats->error_stats.network_sustained_ok++;
673 
674 				writeq(
675 				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
676 					&vp_reg->asic_ntwk_vp_err_mask);
677 
678 				__vxge_hw_device_handle_link_up_ind(hldev);
679 				alarm_event = VXGE_HW_SET_LEVEL(
680 					VXGE_HW_EVENT_LINK_UP, alarm_event);
681 			}
682 
683 			writeq(VXGE_HW_INTR_MASK_ALL,
684 				&vp_reg->asic_ntwk_vp_err_reg);
685 
686 			alarm_event = VXGE_HW_SET_LEVEL(
687 				VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
688 
689 			if (skip_alarms)
690 				return VXGE_HW_OK;
691 		}
692 	}
693 
694 	if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
695 
696 		pic_status = readq(&vp_reg->vpath_ppif_int_status);
697 
698 		if (pic_status &
699 		    VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
700 
701 			val64 = readq(&vp_reg->general_errors_reg);
702 			mask64 = readq(&vp_reg->general_errors_mask);
703 
704 			if ((val64 &
705 				VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
706 				~mask64) {
707 				sw_stats->error_stats.ini_serr_det++;
708 
709 				alarm_event = VXGE_HW_SET_LEVEL(
710 					VXGE_HW_EVENT_SERR, alarm_event);
711 			}
712 
713 			if ((val64 &
714 			    VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
715 				~mask64) {
716 				sw_stats->error_stats.dblgen_fifo0_overflow++;
717 
718 				alarm_event = VXGE_HW_SET_LEVEL(
719 					VXGE_HW_EVENT_FIFO_ERR, alarm_event);
720 			}
721 
722 			if ((val64 &
723 			    VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
724 				~mask64)
725 				sw_stats->error_stats.statsb_pif_chain_error++;
726 
727 			if ((val64 &
728 			   VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
729 				~mask64)
730 				sw_stats->error_stats.statsb_drop_timeout++;
731 
732 			if ((val64 &
733 				VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
734 				~mask64)
735 				sw_stats->error_stats.target_illegal_access++;
736 
737 			if (!skip_alarms) {
738 				writeq(VXGE_HW_INTR_MASK_ALL,
739 					&vp_reg->general_errors_reg);
740 				alarm_event = VXGE_HW_SET_LEVEL(
741 					VXGE_HW_EVENT_ALARM_CLEARED,
742 					alarm_event);
743 			}
744 		}
745 
746 		if (pic_status &
747 		    VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
748 
749 			val64 = readq(&vp_reg->kdfcctl_errors_reg);
750 			mask64 = readq(&vp_reg->kdfcctl_errors_mask);
751 
752 			if ((val64 &
753 			    VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
754 				~mask64) {
755 				sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
756 
757 				alarm_event = VXGE_HW_SET_LEVEL(
758 					VXGE_HW_EVENT_FIFO_ERR,
759 					alarm_event);
760 			}
761 
762 			if ((val64 &
763 			    VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
764 				~mask64) {
765 				sw_stats->error_stats.kdfcctl_fifo0_poison++;
766 
767 				alarm_event = VXGE_HW_SET_LEVEL(
768 					VXGE_HW_EVENT_FIFO_ERR,
769 					alarm_event);
770 			}
771 
772 			if ((val64 &
773 			    VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
774 				~mask64) {
775 				sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
776 
777 				alarm_event = VXGE_HW_SET_LEVEL(
778 					VXGE_HW_EVENT_FIFO_ERR,
779 					alarm_event);
780 			}
781 
782 			if (!skip_alarms) {
783 				writeq(VXGE_HW_INTR_MASK_ALL,
784 					&vp_reg->kdfcctl_errors_reg);
785 				alarm_event = VXGE_HW_SET_LEVEL(
786 					VXGE_HW_EVENT_ALARM_CLEARED,
787 					alarm_event);
788 			}
789 		}
790 
791 	}
792 
793 	if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
794 
795 		val64 = readq(&vp_reg->wrdma_alarm_status);
796 
797 		if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
798 
799 			val64 = readq(&vp_reg->prc_alarm_reg);
800 			mask64 = readq(&vp_reg->prc_alarm_mask);
801 
802 			if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
803 				~mask64)
804 				sw_stats->error_stats.prc_ring_bumps++;
805 
806 			if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
807 				~mask64) {
808 				sw_stats->error_stats.prc_rxdcm_sc_err++;
809 
810 				alarm_event = VXGE_HW_SET_LEVEL(
811 					VXGE_HW_EVENT_VPATH_ERR,
812 					alarm_event);
813 			}
814 
815 			if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
816 				& ~mask64) {
817 				sw_stats->error_stats.prc_rxdcm_sc_abort++;
818 
819 				alarm_event = VXGE_HW_SET_LEVEL(
820 						VXGE_HW_EVENT_VPATH_ERR,
821 						alarm_event);
822 			}
823 
824 			if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
825 				 & ~mask64) {
826 				sw_stats->error_stats.prc_quanta_size_err++;
827 
828 				alarm_event = VXGE_HW_SET_LEVEL(
829 					VXGE_HW_EVENT_VPATH_ERR,
830 					alarm_event);
831 			}
832 
833 			if (!skip_alarms) {
834 				writeq(VXGE_HW_INTR_MASK_ALL,
835 					&vp_reg->prc_alarm_reg);
836 				alarm_event = VXGE_HW_SET_LEVEL(
837 						VXGE_HW_EVENT_ALARM_CLEARED,
838 						alarm_event);
839 			}
840 		}
841 	}
842 out:
843 	hldev->stats.sw_dev_err_stats.vpath_alarms++;
844 out2:
845 	if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
846 		(alarm_event == VXGE_HW_EVENT_UNKNOWN))
847 		return VXGE_HW_OK;
848 
849 	__vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
850 
851 	if (alarm_event == VXGE_HW_EVENT_SERR)
852 		return VXGE_HW_ERR_CRITICAL;
853 
854 	return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
855 		VXGE_HW_ERR_SLOT_FREEZE :
856 		(alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
857 		VXGE_HW_ERR_VPATH;
858 }
859 
860 /**
861  * vxge_hw_device_begin_irq - Begin IRQ processing.
862  * @hldev: HW device handle.
863  * @skip_alarms: Do not clear the alarms
864  * @reason: "Reason" for the interrupt, the value of Titan's
865  *	general_int_status register.
866  *
867  * The function	performs two actions, It first checks whether (shared IRQ) the
868  * interrupt was raised	by the device. Next, it	masks the device interrupts.
869  *
870  * Note:
871  * vxge_hw_device_begin_irq() does not flush MMIO writes through the
872  * bridge. Therefore, two back-to-back interrupts are potentially possible.
873  *
874  * Returns: 0, if the interrupt	is not "ours" (note that in this case the
875  * device remain enabled).
876  * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
877  * status.
878  */
vxge_hw_device_begin_irq(struct __vxge_hw_device * hldev,u32 skip_alarms,u64 * reason)879 enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
880 					     u32 skip_alarms, u64 *reason)
881 {
882 	u32 i;
883 	u64 val64;
884 	u64 adapter_status;
885 	u64 vpath_mask;
886 	enum vxge_hw_status ret = VXGE_HW_OK;
887 
888 	val64 = readq(&hldev->common_reg->titan_general_int_status);
889 
890 	if (unlikely(!val64)) {
891 		/* not Titan interrupt	*/
892 		*reason	= 0;
893 		ret = VXGE_HW_ERR_WRONG_IRQ;
894 		goto exit;
895 	}
896 
897 	if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
898 
899 		adapter_status = readq(&hldev->common_reg->adapter_status);
900 
901 		if (adapter_status == VXGE_HW_ALL_FOXES) {
902 
903 			__vxge_hw_device_handle_error(hldev,
904 				NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
905 			*reason	= 0;
906 			ret = VXGE_HW_ERR_SLOT_FREEZE;
907 			goto exit;
908 		}
909 	}
910 
911 	hldev->stats.sw_dev_info_stats.total_intr_cnt++;
912 
913 	*reason	= val64;
914 
915 	vpath_mask = hldev->vpaths_deployed >>
916 				(64 - VXGE_HW_MAX_VIRTUAL_PATHS);
917 
918 	if (val64 &
919 	    VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
920 		hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
921 
922 		return VXGE_HW_OK;
923 	}
924 
925 	hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
926 
927 	if (unlikely(val64 &
928 			VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
929 
930 		enum vxge_hw_status error_level = VXGE_HW_OK;
931 
932 		hldev->stats.sw_dev_err_stats.vpath_alarms++;
933 
934 		for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
935 
936 			if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
937 				continue;
938 
939 			ret = __vxge_hw_vpath_alarm_process(
940 				&hldev->virtual_paths[i], skip_alarms);
941 
942 			error_level = VXGE_HW_SET_LEVEL(ret, error_level);
943 
944 			if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
945 				(ret == VXGE_HW_ERR_SLOT_FREEZE)))
946 				break;
947 		}
948 
949 		ret = error_level;
950 	}
951 exit:
952 	return ret;
953 }
954 
955 /**
956  * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
957  * condition that has caused the Tx and RX interrupt.
958  * @hldev: HW device.
959  *
960  * Acknowledge (that is, clear) the condition that has caused
961  * the Tx and Rx interrupt.
962  * See also: vxge_hw_device_begin_irq(),
963  * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
964  */
vxge_hw_device_clear_tx_rx(struct __vxge_hw_device * hldev)965 void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
966 {
967 
968 	if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
969 	   (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
970 		writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
971 				 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
972 				&hldev->common_reg->tim_int_status0);
973 	}
974 
975 	if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
976 	   (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
977 		__vxge_hw_pio_mem_write32_upper(
978 				(hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
979 				 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
980 				&hldev->common_reg->tim_int_status1);
981 	}
982 }
983 
984 /*
985  * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel
986  * @channel: Channel
987  * @dtrh: Buffer to return the DTR pointer
988  *
989  * Allocates a dtr from the reserve array. If the reserve array is empty,
990  * it swaps the reserve and free arrays.
991  *
992  */
993 static enum vxge_hw_status
vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel * channel,void ** dtrh)994 vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
995 {
996 	if (channel->reserve_ptr - channel->reserve_top > 0) {
997 _alloc_after_swap:
998 		*dtrh =	channel->reserve_arr[--channel->reserve_ptr];
999 
1000 		return VXGE_HW_OK;
1001 	}
1002 
1003 	/* switch between empty	and full arrays	*/
1004 
1005 	/* the idea behind such	a design is that by having free	and reserved
1006 	 * arrays separated we basically separated irq and non-irq parts.
1007 	 * i.e.	no additional lock need	to be done when	we free	a resource */
1008 
1009 	if (channel->length - channel->free_ptr > 0) {
1010 		swap(channel->reserve_arr, channel->free_arr);
1011 		channel->reserve_ptr = channel->length;
1012 		channel->reserve_top = channel->free_ptr;
1013 		channel->free_ptr = channel->length;
1014 
1015 		channel->stats->reserve_free_swaps_cnt++;
1016 
1017 		goto _alloc_after_swap;
1018 	}
1019 
1020 	channel->stats->full_cnt++;
1021 
1022 	*dtrh =	NULL;
1023 	return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
1024 }
1025 
1026 /*
1027  * vxge_hw_channel_dtr_post - Post a dtr to the channel
1028  * @channelh: Channel
1029  * @dtrh: DTR pointer
1030  *
1031  * Posts a dtr to work array.
1032  *
1033  */
1034 static void
vxge_hw_channel_dtr_post(struct __vxge_hw_channel * channel,void * dtrh)1035 vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
1036 {
1037 	vxge_assert(channel->work_arr[channel->post_index] == NULL);
1038 
1039 	channel->work_arr[channel->post_index++] = dtrh;
1040 
1041 	/* wrap-around */
1042 	if (channel->post_index	== channel->length)
1043 		channel->post_index = 0;
1044 }
1045 
1046 /*
1047  * vxge_hw_channel_dtr_try_complete - Returns next completed dtr
1048  * @channel: Channel
1049  * @dtr: Buffer to return the next completed DTR pointer
1050  *
1051  * Returns the next completed dtr with out removing it from work array
1052  *
1053  */
1054 void
vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel * channel,void ** dtrh)1055 vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh)
1056 {
1057 	vxge_assert(channel->compl_index < channel->length);
1058 
1059 	*dtrh =	channel->work_arr[channel->compl_index];
1060 	prefetch(*dtrh);
1061 }
1062 
1063 /*
1064  * vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array
1065  * @channel: Channel handle
1066  *
1067  * Removes the next completed dtr from work array
1068  *
1069  */
vxge_hw_channel_dtr_complete(struct __vxge_hw_channel * channel)1070 void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel)
1071 {
1072 	channel->work_arr[channel->compl_index]	= NULL;
1073 
1074 	/* wrap-around */
1075 	if (++channel->compl_index == channel->length)
1076 		channel->compl_index = 0;
1077 
1078 	channel->stats->total_compl_cnt++;
1079 }
1080 
1081 /*
1082  * vxge_hw_channel_dtr_free - Frees a dtr
1083  * @channel: Channel handle
1084  * @dtr:  DTR pointer
1085  *
1086  * Returns the dtr to free array
1087  *
1088  */
vxge_hw_channel_dtr_free(struct __vxge_hw_channel * channel,void * dtrh)1089 void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh)
1090 {
1091 	channel->free_arr[--channel->free_ptr] = dtrh;
1092 }
1093 
1094 /*
1095  * vxge_hw_channel_dtr_count
1096  * @channel: Channel handle. Obtained via vxge_hw_channel_open().
1097  *
1098  * Retrieve number of DTRs available. This function can not be called
1099  * from data path. ring_initial_replenishi() is the only user.
1100  */
vxge_hw_channel_dtr_count(struct __vxge_hw_channel * channel)1101 int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel)
1102 {
1103 	return (channel->reserve_ptr - channel->reserve_top) +
1104 		(channel->length - channel->free_ptr);
1105 }
1106 
1107 /**
1108  * vxge_hw_ring_rxd_reserve	- Reserve ring descriptor.
1109  * @ring: Handle to the ring object used for receive
1110  * @rxdh: Reserved descriptor. On success HW fills this "out" parameter
1111  * with a valid handle.
1112  *
1113  * Reserve Rx descriptor for the subsequent filling-in driver
1114  * and posting on the corresponding channel (@channelh)
1115  * via vxge_hw_ring_rxd_post().
1116  *
1117  * Returns: VXGE_HW_OK - success.
1118  * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
1119  *
1120  */
vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring * ring,void ** rxdh)1121 enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring,
1122 	void **rxdh)
1123 {
1124 	enum vxge_hw_status status;
1125 	struct __vxge_hw_channel *channel;
1126 
1127 	channel = &ring->channel;
1128 
1129 	status = vxge_hw_channel_dtr_alloc(channel, rxdh);
1130 
1131 	if (status == VXGE_HW_OK) {
1132 		struct vxge_hw_ring_rxd_1 *rxdp =
1133 			(struct vxge_hw_ring_rxd_1 *)*rxdh;
1134 
1135 		rxdp->control_0	= rxdp->control_1 = 0;
1136 	}
1137 
1138 	return status;
1139 }
1140 
1141 /**
1142  * vxge_hw_ring_rxd_free - Free descriptor.
1143  * @ring: Handle to the ring object used for receive
1144  * @rxdh: Descriptor handle.
1145  *
1146  * Free	the reserved descriptor. This operation is "symmetrical" to
1147  * vxge_hw_ring_rxd_reserve. The "free-ing" completes the descriptor's
1148  * lifecycle.
1149  *
1150  * After free-ing (see vxge_hw_ring_rxd_free()) the descriptor again can
1151  * be:
1152  *
1153  * - reserved (vxge_hw_ring_rxd_reserve);
1154  *
1155  * - posted	(vxge_hw_ring_rxd_post);
1156  *
1157  * - completed (vxge_hw_ring_rxd_next_completed);
1158  *
1159  * - and recycled again	(vxge_hw_ring_rxd_free).
1160  *
1161  * For alternative state transitions and more details please refer to
1162  * the design doc.
1163  *
1164  */
vxge_hw_ring_rxd_free(struct __vxge_hw_ring * ring,void * rxdh)1165 void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh)
1166 {
1167 	struct __vxge_hw_channel *channel;
1168 
1169 	channel = &ring->channel;
1170 
1171 	vxge_hw_channel_dtr_free(channel, rxdh);
1172 
1173 }
1174 
1175 /**
1176  * vxge_hw_ring_rxd_pre_post - Prepare rxd and post
1177  * @ring: Handle to the ring object used for receive
1178  * @rxdh: Descriptor handle.
1179  *
1180  * This routine prepares a rxd and posts
1181  */
vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring * ring,void * rxdh)1182 void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh)
1183 {
1184 	struct __vxge_hw_channel *channel;
1185 
1186 	channel = &ring->channel;
1187 
1188 	vxge_hw_channel_dtr_post(channel, rxdh);
1189 }
1190 
1191 /**
1192  * vxge_hw_ring_rxd_post_post - Process rxd after post.
1193  * @ring: Handle to the ring object used for receive
1194  * @rxdh: Descriptor handle.
1195  *
1196  * Processes rxd after post
1197  */
vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring * ring,void * rxdh)1198 void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
1199 {
1200 	struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1201 
1202 	rxdp->control_0	= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1203 
1204 	if (ring->stats->common_stats.usage_cnt > 0)
1205 		ring->stats->common_stats.usage_cnt--;
1206 }
1207 
1208 /**
1209  * vxge_hw_ring_rxd_post - Post descriptor on the ring.
1210  * @ring: Handle to the ring object used for receive
1211  * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
1212  *
1213  * Post	descriptor on the ring.
1214  * Prior to posting the	descriptor should be filled in accordance with
1215  * Host/Titan interface specification for a given service (LL, etc.).
1216  *
1217  */
vxge_hw_ring_rxd_post(struct __vxge_hw_ring * ring,void * rxdh)1218 void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
1219 {
1220 	struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1221 	struct __vxge_hw_channel *channel;
1222 
1223 	channel = &ring->channel;
1224 
1225 	wmb();
1226 	rxdp->control_0	= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1227 
1228 	vxge_hw_channel_dtr_post(channel, rxdh);
1229 
1230 	if (ring->stats->common_stats.usage_cnt > 0)
1231 		ring->stats->common_stats.usage_cnt--;
1232 }
1233 
1234 /**
1235  * vxge_hw_ring_rxd_post_post_wmb - Process rxd after post with memory barrier.
1236  * @ring: Handle to the ring object used for receive
1237  * @rxdh: Descriptor handle.
1238  *
1239  * Processes rxd after post with memory barrier.
1240  */
vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring * ring,void * rxdh)1241 void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
1242 {
1243 	wmb();
1244 	vxge_hw_ring_rxd_post_post(ring, rxdh);
1245 }
1246 
1247 /**
1248  * vxge_hw_ring_rxd_next_completed - Get the _next_ completed descriptor.
1249  * @ring: Handle to the ring object used for receive
1250  * @rxdh: Descriptor handle. Returned by HW.
1251  * @t_code:	Transfer code, as per Titan User Guide,
1252  *	 Receive Descriptor Format. Returned by HW.
1253  *
1254  * Retrieve the	_next_ completed descriptor.
1255  * HW uses ring callback (*vxge_hw_ring_callback_f) to notifiy
1256  * driver of new completed descriptors. After that
1257  * the driver can use vxge_hw_ring_rxd_next_completed to retrieve the rest
1258  * completions (the very first completion is passed by HW via
1259  * vxge_hw_ring_callback_f).
1260  *
1261  * Implementation-wise, the driver is free to call
1262  * vxge_hw_ring_rxd_next_completed either immediately from inside the
1263  * ring callback, or in a deferred fashion and separate (from HW)
1264  * context.
1265  *
1266  * Non-zero @t_code means failure to fill-in receive buffer(s)
1267  * of the descriptor.
1268  * For instance, parity	error detected during the data transfer.
1269  * In this case	Titan will complete the descriptor and indicate
1270  * for the host	that the received data is not to be used.
1271  * For details please refer to Titan User Guide.
1272  *
1273  * Returns: VXGE_HW_OK - success.
1274  * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1275  * are currently available for processing.
1276  *
1277  * See also: vxge_hw_ring_callback_f{},
1278  * vxge_hw_fifo_rxd_next_completed(), enum vxge_hw_status{}.
1279  */
vxge_hw_ring_rxd_next_completed(struct __vxge_hw_ring * ring,void ** rxdh,u8 * t_code)1280 enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
1281 	struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code)
1282 {
1283 	struct __vxge_hw_channel *channel;
1284 	struct vxge_hw_ring_rxd_1 *rxdp;
1285 	enum vxge_hw_status status = VXGE_HW_OK;
1286 	u64 control_0, own;
1287 
1288 	channel = &ring->channel;
1289 
1290 	vxge_hw_channel_dtr_try_complete(channel, rxdh);
1291 
1292 	rxdp = *rxdh;
1293 	if (rxdp == NULL) {
1294 		status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1295 		goto exit;
1296 	}
1297 
1298 	control_0 = rxdp->control_0;
1299 	own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1300 	*t_code	= (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0);
1301 
1302 	/* check whether it is not the end */
1303 	if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) {
1304 
1305 		vxge_assert((rxdp)->host_control !=
1306 				0);
1307 
1308 		++ring->cmpl_cnt;
1309 		vxge_hw_channel_dtr_complete(channel);
1310 
1311 		vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
1312 
1313 		ring->stats->common_stats.usage_cnt++;
1314 		if (ring->stats->common_stats.usage_max <
1315 				ring->stats->common_stats.usage_cnt)
1316 			ring->stats->common_stats.usage_max =
1317 				ring->stats->common_stats.usage_cnt;
1318 
1319 		status = VXGE_HW_OK;
1320 		goto exit;
1321 	}
1322 
1323 	/* reset it. since we don't want to return
1324 	 * garbage to the driver */
1325 	*rxdh =	NULL;
1326 	status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1327 exit:
1328 	return status;
1329 }
1330 
1331 /**
1332  * vxge_hw_ring_handle_tcode - Handle transfer code.
1333  * @ring: Handle to the ring object used for receive
1334  * @rxdh: Descriptor handle.
1335  * @t_code: One of the enumerated (and documented in the Titan user guide)
1336  * "transfer codes".
1337  *
1338  * Handle descriptor's transfer code. The latter comes with each completed
1339  * descriptor.
1340  *
1341  * Returns: one of the enum vxge_hw_status{} enumerated types.
1342  * VXGE_HW_OK			- for success.
1343  * VXGE_HW_ERR_CRITICAL         - when encounters critical error.
1344  */
vxge_hw_ring_handle_tcode(struct __vxge_hw_ring * ring,void * rxdh,u8 t_code)1345 enum vxge_hw_status vxge_hw_ring_handle_tcode(
1346 	struct __vxge_hw_ring *ring, void *rxdh, u8 t_code)
1347 {
1348 	enum vxge_hw_status status = VXGE_HW_OK;
1349 
1350 	/* If the t_code is not supported and if the
1351 	 * t_code is other than 0x5 (unparseable packet
1352 	 * such as unknown UPV6 header), Drop it !!!
1353 	 */
1354 
1355 	if (t_code ==  VXGE_HW_RING_T_CODE_OK ||
1356 		t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) {
1357 		status = VXGE_HW_OK;
1358 		goto exit;
1359 	}
1360 
1361 	if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) {
1362 		status = VXGE_HW_ERR_INVALID_TCODE;
1363 		goto exit;
1364 	}
1365 
1366 	ring->stats->rxd_t_code_err_cnt[t_code]++;
1367 exit:
1368 	return status;
1369 }
1370 
1371 /**
1372  * __vxge_hw_non_offload_db_post - Post non offload doorbell
1373  *
1374  * @fifo: fifohandle
1375  * @txdl_ptr: The starting location of the TxDL in host memory
1376  * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
1377  * @no_snoop: No snoop flags
1378  *
1379  * This function posts a non-offload doorbell to doorbell FIFO
1380  *
1381  */
__vxge_hw_non_offload_db_post(struct __vxge_hw_fifo * fifo,u64 txdl_ptr,u32 num_txds,u32 no_snoop)1382 static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
1383 	u64 txdl_ptr, u32 num_txds, u32 no_snoop)
1384 {
1385 	writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
1386 		VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) |
1387 		VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
1388 		&fifo->nofl_db->control_0);
1389 
1390 	writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
1391 }
1392 
1393 /**
1394  * vxge_hw_fifo_free_txdl_count_get - returns the number of txdls available in
1395  * the fifo
1396  * @fifoh: Handle to the fifo object used for non offload send
1397  */
vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo * fifoh)1398 u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh)
1399 {
1400 	return vxge_hw_channel_dtr_count(&fifoh->channel);
1401 }
1402 
1403 /**
1404  * vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor.
1405  * @fifo: Handle to the fifo object used for non offload send
1406  * @txdlh: Reserved descriptor. On success HW fills this "out" parameter
1407  *        with a valid handle.
1408  * @txdl_priv: Buffer to return the pointer to per txdl space
1409  *
1410  * Reserve a single TxDL (that is, fifo descriptor)
1411  * for the subsequent filling-in by driver)
1412  * and posting on the corresponding channel (@channelh)
1413  * via vxge_hw_fifo_txdl_post().
1414  *
1415  * Note: it is the responsibility of driver to reserve multiple descriptors
1416  * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
1417  * carries up to configured number (fifo.max_frags) of contiguous buffers.
1418  *
1419  * Returns: VXGE_HW_OK - success;
1420  * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
1421  *
1422  */
vxge_hw_fifo_txdl_reserve(struct __vxge_hw_fifo * fifo,void ** txdlh,void ** txdl_priv)1423 enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
1424 	struct __vxge_hw_fifo *fifo,
1425 	void **txdlh, void **txdl_priv)
1426 {
1427 	struct __vxge_hw_channel *channel;
1428 	enum vxge_hw_status status;
1429 	int i;
1430 
1431 	channel = &fifo->channel;
1432 
1433 	status = vxge_hw_channel_dtr_alloc(channel, txdlh);
1434 
1435 	if (status == VXGE_HW_OK) {
1436 		struct vxge_hw_fifo_txd *txdp =
1437 			(struct vxge_hw_fifo_txd *)*txdlh;
1438 		struct __vxge_hw_fifo_txdl_priv *priv;
1439 
1440 		priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
1441 
1442 		/* reset the TxDL's private */
1443 		priv->align_dma_offset = 0;
1444 		priv->align_vaddr_start = priv->align_vaddr;
1445 		priv->align_used_frags = 0;
1446 		priv->frags = 0;
1447 		priv->alloc_frags = fifo->config->max_frags;
1448 		priv->next_txdl_priv = NULL;
1449 
1450 		*txdl_priv = (void *)(size_t)txdp->host_control;
1451 
1452 		for (i = 0; i < fifo->config->max_frags; i++) {
1453 			txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i;
1454 			txdp->control_0 = txdp->control_1 = 0;
1455 		}
1456 	}
1457 
1458 	return status;
1459 }
1460 
1461 /**
1462  * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
1463  * descriptor.
1464  * @fifo: Handle to the fifo object used for non offload send
1465  * @txdlh: Descriptor handle.
1466  * @frag_idx: Index of the data buffer in the caller's scatter-gather list
1467  *            (of buffers).
1468  * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
1469  * @size: Size of the data buffer (in bytes).
1470  *
1471  * This API is part of the preparation of the transmit descriptor for posting
1472  * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
1473  * vxge_hw_fifo_txdl_mss_set() and vxge_hw_fifo_txdl_cksum_set_bits().
1474  * All three APIs fill in the fields of the fifo descriptor,
1475  * in accordance with the Titan specification.
1476  *
1477  */
vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo * fifo,void * txdlh,u32 frag_idx,dma_addr_t dma_pointer,u32 size)1478 void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
1479 				  void *txdlh, u32 frag_idx,
1480 				  dma_addr_t dma_pointer, u32 size)
1481 {
1482 	struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1483 	struct vxge_hw_fifo_txd *txdp, *txdp_last;
1484 
1485 	txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1486 	txdp = (struct vxge_hw_fifo_txd *)txdlh  +  txdl_priv->frags;
1487 
1488 	if (frag_idx != 0)
1489 		txdp->control_0 = txdp->control_1 = 0;
1490 	else {
1491 		txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1492 			VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST);
1493 		txdp->control_1 |= fifo->interrupt_type;
1494 		txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER(
1495 			fifo->tx_intr_num);
1496 		if (txdl_priv->frags) {
1497 			txdp_last = (struct vxge_hw_fifo_txd *)txdlh  +
1498 			(txdl_priv->frags - 1);
1499 			txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1500 				VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1501 		}
1502 	}
1503 
1504 	vxge_assert(frag_idx < txdl_priv->alloc_frags);
1505 
1506 	txdp->buffer_pointer = (u64)dma_pointer;
1507 	txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size);
1508 	fifo->stats->total_buffers++;
1509 	txdl_priv->frags++;
1510 }
1511 
1512 /**
1513  * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
1514  * @fifo: Handle to the fifo object used for non offload send
1515  * @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve()
1516  *
1517  * Post descriptor on the 'fifo' type channel for transmission.
1518  * Prior to posting the descriptor should be filled in accordance with
1519  * Host/Titan interface specification for a given service (LL, etc.).
1520  *
1521  */
vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo * fifo,void * txdlh)1522 void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
1523 {
1524 	struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1525 	struct vxge_hw_fifo_txd *txdp_last;
1526 	struct vxge_hw_fifo_txd *txdp_first;
1527 
1528 	txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1529 	txdp_first = txdlh;
1530 
1531 	txdp_last = (struct vxge_hw_fifo_txd *)txdlh  +  (txdl_priv->frags - 1);
1532 	txdp_last->control_0 |=
1533 	      VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1534 	txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
1535 
1536 	vxge_hw_channel_dtr_post(&fifo->channel, txdlh);
1537 
1538 	__vxge_hw_non_offload_db_post(fifo,
1539 		(u64)txdl_priv->dma_addr,
1540 		txdl_priv->frags - 1,
1541 		fifo->no_snoop_bits);
1542 
1543 	fifo->stats->total_posts++;
1544 	fifo->stats->common_stats.usage_cnt++;
1545 	if (fifo->stats->common_stats.usage_max <
1546 		fifo->stats->common_stats.usage_cnt)
1547 		fifo->stats->common_stats.usage_max =
1548 			fifo->stats->common_stats.usage_cnt;
1549 }
1550 
1551 /**
1552  * vxge_hw_fifo_txdl_next_completed - Retrieve next completed descriptor.
1553  * @fifo: Handle to the fifo object used for non offload send
1554  * @txdlh: Descriptor handle. Returned by HW.
1555  * @t_code: Transfer code, as per Titan User Guide,
1556  *          Transmit Descriptor Format.
1557  *          Returned by HW.
1558  *
1559  * Retrieve the _next_ completed descriptor.
1560  * HW uses channel callback (*vxge_hw_channel_callback_f) to notifiy
1561  * driver of new completed descriptors. After that
1562  * the driver can use vxge_hw_fifo_txdl_next_completed to retrieve the rest
1563  * completions (the very first completion is passed by HW via
1564  * vxge_hw_channel_callback_f).
1565  *
1566  * Implementation-wise, the driver is free to call
1567  * vxge_hw_fifo_txdl_next_completed either immediately from inside the
1568  * channel callback, or in a deferred fashion and separate (from HW)
1569  * context.
1570  *
1571  * Non-zero @t_code means failure to process the descriptor.
1572  * The failure could happen, for instance, when the link is
1573  * down, in which case Titan completes the descriptor because it
1574  * is not able to send the data out.
1575  *
1576  * For details please refer to Titan User Guide.
1577  *
1578  * Returns: VXGE_HW_OK - success.
1579  * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1580  * are currently available for processing.
1581  *
1582  */
vxge_hw_fifo_txdl_next_completed(struct __vxge_hw_fifo * fifo,void ** txdlh,enum vxge_hw_fifo_tcode * t_code)1583 enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
1584 	struct __vxge_hw_fifo *fifo, void **txdlh,
1585 	enum vxge_hw_fifo_tcode *t_code)
1586 {
1587 	struct __vxge_hw_channel *channel;
1588 	struct vxge_hw_fifo_txd *txdp;
1589 	enum vxge_hw_status status = VXGE_HW_OK;
1590 
1591 	channel = &fifo->channel;
1592 
1593 	vxge_hw_channel_dtr_try_complete(channel, txdlh);
1594 
1595 	txdp = *txdlh;
1596 	if (txdp == NULL) {
1597 		status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1598 		goto exit;
1599 	}
1600 
1601 	/* check whether host owns it */
1602 	if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) {
1603 
1604 		vxge_assert(txdp->host_control != 0);
1605 
1606 		vxge_hw_channel_dtr_complete(channel);
1607 
1608 		*t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0);
1609 
1610 		if (fifo->stats->common_stats.usage_cnt > 0)
1611 			fifo->stats->common_stats.usage_cnt--;
1612 
1613 		status = VXGE_HW_OK;
1614 		goto exit;
1615 	}
1616 
1617 	/* no more completions */
1618 	*txdlh = NULL;
1619 	status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1620 exit:
1621 	return status;
1622 }
1623 
1624 /**
1625  * vxge_hw_fifo_handle_tcode - Handle transfer code.
1626  * @fifo: Handle to the fifo object used for non offload send
1627  * @txdlh: Descriptor handle.
1628  * @t_code: One of the enumerated (and documented in the Titan user guide)
1629  *          "transfer codes".
1630  *
1631  * Handle descriptor's transfer code. The latter comes with each completed
1632  * descriptor.
1633  *
1634  * Returns: one of the enum vxge_hw_status{} enumerated types.
1635  * VXGE_HW_OK - for success.
1636  * VXGE_HW_ERR_CRITICAL - when encounters critical error.
1637  */
vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo * fifo,void * txdlh,enum vxge_hw_fifo_tcode t_code)1638 enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo,
1639 					      void *txdlh,
1640 					      enum vxge_hw_fifo_tcode t_code)
1641 {
1642 	enum vxge_hw_status status = VXGE_HW_OK;
1643 
1644 	if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) {
1645 		status = VXGE_HW_ERR_INVALID_TCODE;
1646 		goto exit;
1647 	}
1648 
1649 	fifo->stats->txd_t_code_err_cnt[t_code]++;
1650 exit:
1651 	return status;
1652 }
1653 
1654 /**
1655  * vxge_hw_fifo_txdl_free - Free descriptor.
1656  * @fifo: Handle to the fifo object used for non offload send
1657  * @txdlh: Descriptor handle.
1658  *
1659  * Free the reserved descriptor. This operation is "symmetrical" to
1660  * vxge_hw_fifo_txdl_reserve. The "free-ing" completes the descriptor's
1661  * lifecycle.
1662  *
1663  * After free-ing (see vxge_hw_fifo_txdl_free()) the descriptor again can
1664  * be:
1665  *
1666  * - reserved (vxge_hw_fifo_txdl_reserve);
1667  *
1668  * - posted (vxge_hw_fifo_txdl_post);
1669  *
1670  * - completed (vxge_hw_fifo_txdl_next_completed);
1671  *
1672  * - and recycled again (vxge_hw_fifo_txdl_free).
1673  *
1674  * For alternative state transitions and more details please refer to
1675  * the design doc.
1676  *
1677  */
vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo * fifo,void * txdlh)1678 void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
1679 {
1680 	struct __vxge_hw_channel *channel;
1681 
1682 	channel = &fifo->channel;
1683 
1684 	vxge_hw_channel_dtr_free(channel, txdlh);
1685 }
1686 
1687 /**
1688  * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath to MAC address table.
1689  * @vp: Vpath handle.
1690  * @macaddr: MAC address to be added for this vpath into the list
1691  * @macaddr_mask: MAC address mask for macaddr
1692  * @duplicate_mode: Duplicate MAC address add mode. Please see
1693  *             enum vxge_hw_vpath_mac_addr_add_mode{}
1694  *
1695  * Adds the given mac address and mac address mask into the list for this
1696  * vpath.
1697  * see also: vxge_hw_vpath_mac_addr_delete, vxge_hw_vpath_mac_addr_get and
1698  * vxge_hw_vpath_mac_addr_get_next
1699  *
1700  */
1701 enum vxge_hw_status
vxge_hw_vpath_mac_addr_add(struct __vxge_hw_vpath_handle * vp,u8 * macaddr,u8 * macaddr_mask,enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)1702 vxge_hw_vpath_mac_addr_add(
1703 	struct __vxge_hw_vpath_handle *vp,
1704 	u8 *macaddr,
1705 	u8 *macaddr_mask,
1706 	enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)
1707 {
1708 	u32 i;
1709 	u64 data1 = 0ULL;
1710 	u64 data2 = 0ULL;
1711 	enum vxge_hw_status status = VXGE_HW_OK;
1712 
1713 	if (vp == NULL) {
1714 		status = VXGE_HW_ERR_INVALID_HANDLE;
1715 		goto exit;
1716 	}
1717 
1718 	for (i = 0; i < ETH_ALEN; i++) {
1719 		data1 <<= 8;
1720 		data1 |= (u8)macaddr[i];
1721 
1722 		data2 <<= 8;
1723 		data2 |= (u8)macaddr_mask[i];
1724 	}
1725 
1726 	switch (duplicate_mode) {
1727 	case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE:
1728 		i = 0;
1729 		break;
1730 	case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE:
1731 		i = 1;
1732 		break;
1733 	case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE:
1734 		i = 2;
1735 		break;
1736 	default:
1737 		i = 0;
1738 		break;
1739 	}
1740 
1741 	status = __vxge_hw_vpath_rts_table_set(vp,
1742 			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1743 			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1744 			0,
1745 			VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1746 			VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)|
1747 			VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i));
1748 exit:
1749 	return status;
1750 }
1751 
1752 /**
1753  * vxge_hw_vpath_mac_addr_get - Get the first mac address entry
1754  * @vp: Vpath handle.
1755  * @macaddr: First MAC address entry for this vpath in the list
1756  * @macaddr_mask: MAC address mask for macaddr
1757  *
1758  * Get the first mac address entry for this vpath from MAC address table.
1759  * Return: the first mac address and mac address mask in the list for this
1760  * vpath.
1761  * see also: vxge_hw_vpath_mac_addr_get_next
1762  *
1763  */
1764 enum vxge_hw_status
vxge_hw_vpath_mac_addr_get(struct __vxge_hw_vpath_handle * vp,u8 * macaddr,u8 * macaddr_mask)1765 vxge_hw_vpath_mac_addr_get(
1766 	struct __vxge_hw_vpath_handle *vp,
1767 	u8 *macaddr,
1768 	u8 *macaddr_mask)
1769 {
1770 	u32 i;
1771 	u64 data1 = 0ULL;
1772 	u64 data2 = 0ULL;
1773 	enum vxge_hw_status status = VXGE_HW_OK;
1774 
1775 	if (vp == NULL) {
1776 		status = VXGE_HW_ERR_INVALID_HANDLE;
1777 		goto exit;
1778 	}
1779 
1780 	status = __vxge_hw_vpath_rts_table_get(vp,
1781 			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
1782 			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1783 			0, &data1, &data2);
1784 
1785 	if (status != VXGE_HW_OK)
1786 		goto exit;
1787 
1788 	data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1789 
1790 	data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1791 
1792 	for (i = ETH_ALEN; i > 0; i--) {
1793 		macaddr[i-1] = (u8)(data1 & 0xFF);
1794 		data1 >>= 8;
1795 
1796 		macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1797 		data2 >>= 8;
1798 	}
1799 exit:
1800 	return status;
1801 }
1802 
1803 /**
1804  * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry
1805  * @vp: Vpath handle.
1806  * @macaddr: Next MAC address entry for this vpath in the list
1807  * @macaddr_mask: MAC address mask for macaddr
1808  *
1809  * Get the next mac address entry for this vpath from MAC address table.
1810  * Return: the next mac address and mac address mask in the list for this
1811  * vpath.
1812  * see also: vxge_hw_vpath_mac_addr_get
1813  *
1814  */
1815 enum vxge_hw_status
vxge_hw_vpath_mac_addr_get_next(struct __vxge_hw_vpath_handle * vp,u8 * macaddr,u8 * macaddr_mask)1816 vxge_hw_vpath_mac_addr_get_next(
1817 	struct __vxge_hw_vpath_handle *vp,
1818 	u8 *macaddr,
1819 	u8 *macaddr_mask)
1820 {
1821 	u32 i;
1822 	u64 data1 = 0ULL;
1823 	u64 data2 = 0ULL;
1824 	enum vxge_hw_status status = VXGE_HW_OK;
1825 
1826 	if (vp == NULL) {
1827 		status = VXGE_HW_ERR_INVALID_HANDLE;
1828 		goto exit;
1829 	}
1830 
1831 	status = __vxge_hw_vpath_rts_table_get(vp,
1832 			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
1833 			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1834 			0, &data1, &data2);
1835 
1836 	if (status != VXGE_HW_OK)
1837 		goto exit;
1838 
1839 	data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1840 
1841 	data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1842 
1843 	for (i = ETH_ALEN; i > 0; i--) {
1844 		macaddr[i-1] = (u8)(data1 & 0xFF);
1845 		data1 >>= 8;
1846 
1847 		macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1848 		data2 >>= 8;
1849 	}
1850 
1851 exit:
1852 	return status;
1853 }
1854 
1855 /**
1856  * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath to MAC address table.
1857  * @vp: Vpath handle.
1858  * @macaddr: MAC address to be added for this vpath into the list
1859  * @macaddr_mask: MAC address mask for macaddr
1860  *
1861  * Delete the given mac address and mac address mask into the list for this
1862  * vpath.
1863  * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and
1864  * vxge_hw_vpath_mac_addr_get_next
1865  *
1866  */
1867 enum vxge_hw_status
vxge_hw_vpath_mac_addr_delete(struct __vxge_hw_vpath_handle * vp,u8 * macaddr,u8 * macaddr_mask)1868 vxge_hw_vpath_mac_addr_delete(
1869 	struct __vxge_hw_vpath_handle *vp,
1870 	u8 *macaddr,
1871 	u8 *macaddr_mask)
1872 {
1873 	u32 i;
1874 	u64 data1 = 0ULL;
1875 	u64 data2 = 0ULL;
1876 	enum vxge_hw_status status = VXGE_HW_OK;
1877 
1878 	if (vp == NULL) {
1879 		status = VXGE_HW_ERR_INVALID_HANDLE;
1880 		goto exit;
1881 	}
1882 
1883 	for (i = 0; i < ETH_ALEN; i++) {
1884 		data1 <<= 8;
1885 		data1 |= (u8)macaddr[i];
1886 
1887 		data2 <<= 8;
1888 		data2 |= (u8)macaddr_mask[i];
1889 	}
1890 
1891 	status = __vxge_hw_vpath_rts_table_set(vp,
1892 			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1893 			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1894 			0,
1895 			VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1896 			VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2));
1897 exit:
1898 	return status;
1899 }
1900 
1901 /**
1902  * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath to vlan id table.
1903  * @vp: Vpath handle.
1904  * @vid: vlan id to be added for this vpath into the list
1905  *
1906  * Adds the given vlan id into the list for this  vpath.
1907  * see also: vxge_hw_vpath_vid_delete
1908  *
1909  */
1910 enum vxge_hw_status
vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle * vp,u64 vid)1911 vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid)
1912 {
1913 	enum vxge_hw_status status = VXGE_HW_OK;
1914 
1915 	if (vp == NULL) {
1916 		status = VXGE_HW_ERR_INVALID_HANDLE;
1917 		goto exit;
1918 	}
1919 
1920 	status = __vxge_hw_vpath_rts_table_set(vp,
1921 			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1922 			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1923 			0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1924 exit:
1925 	return status;
1926 }
1927 
1928 /**
1929  * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
1930  *               to vlan id table.
1931  * @vp: Vpath handle.
1932  * @vid: vlan id to be added for this vpath into the list
1933  *
1934  * Adds the given vlan id into the list for this  vpath.
1935  * see also: vxge_hw_vpath_vid_add
1936  *
1937  */
1938 enum vxge_hw_status
vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle * vp,u64 vid)1939 vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid)
1940 {
1941 	enum vxge_hw_status status = VXGE_HW_OK;
1942 
1943 	if (vp == NULL) {
1944 		status = VXGE_HW_ERR_INVALID_HANDLE;
1945 		goto exit;
1946 	}
1947 
1948 	status = __vxge_hw_vpath_rts_table_set(vp,
1949 			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1950 			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1951 			0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1952 exit:
1953 	return status;
1954 }
1955 
1956 /**
1957  * vxge_hw_vpath_promisc_enable - Enable promiscuous mode.
1958  * @vp: Vpath handle.
1959  *
1960  * Enable promiscuous mode of Titan-e operation.
1961  *
1962  * See also: vxge_hw_vpath_promisc_disable().
1963  */
vxge_hw_vpath_promisc_enable(struct __vxge_hw_vpath_handle * vp)1964 enum vxge_hw_status vxge_hw_vpath_promisc_enable(
1965 			struct __vxge_hw_vpath_handle *vp)
1966 {
1967 	u64 val64;
1968 	struct __vxge_hw_virtualpath *vpath;
1969 	enum vxge_hw_status status = VXGE_HW_OK;
1970 
1971 	if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1972 		status = VXGE_HW_ERR_INVALID_HANDLE;
1973 		goto exit;
1974 	}
1975 
1976 	vpath = vp->vpath;
1977 
1978 	/* Enable promiscuous mode for function 0 only */
1979 	if (!(vpath->hldev->access_rights &
1980 		VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
1981 		return VXGE_HW_OK;
1982 
1983 	val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1984 
1985 	if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
1986 
1987 		val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
1988 			 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
1989 			 VXGE_HW_RXMAC_VCFG0_BCAST_EN |
1990 			 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN;
1991 
1992 		writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1993 	}
1994 exit:
1995 	return status;
1996 }
1997 
1998 /**
1999  * vxge_hw_vpath_promisc_disable - Disable promiscuous mode.
2000  * @vp: Vpath handle.
2001  *
2002  * Disable promiscuous mode of Titan-e operation.
2003  *
2004  * See also: vxge_hw_vpath_promisc_enable().
2005  */
vxge_hw_vpath_promisc_disable(struct __vxge_hw_vpath_handle * vp)2006 enum vxge_hw_status vxge_hw_vpath_promisc_disable(
2007 			struct __vxge_hw_vpath_handle *vp)
2008 {
2009 	u64 val64;
2010 	struct __vxge_hw_virtualpath *vpath;
2011 	enum vxge_hw_status status = VXGE_HW_OK;
2012 
2013 	if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2014 		status = VXGE_HW_ERR_INVALID_HANDLE;
2015 		goto exit;
2016 	}
2017 
2018 	vpath = vp->vpath;
2019 
2020 	val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2021 
2022 	if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) {
2023 
2024 		val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
2025 			   VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
2026 			   VXGE_HW_RXMAC_VCFG0_ALL_VID_EN);
2027 
2028 		writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2029 	}
2030 exit:
2031 	return status;
2032 }
2033 
2034 /*
2035  * vxge_hw_vpath_bcast_enable - Enable broadcast
2036  * @vp: Vpath handle.
2037  *
2038  * Enable receiving broadcasts.
2039  */
vxge_hw_vpath_bcast_enable(struct __vxge_hw_vpath_handle * vp)2040 enum vxge_hw_status vxge_hw_vpath_bcast_enable(
2041 			struct __vxge_hw_vpath_handle *vp)
2042 {
2043 	u64 val64;
2044 	struct __vxge_hw_virtualpath *vpath;
2045 	enum vxge_hw_status status = VXGE_HW_OK;
2046 
2047 	if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2048 		status = VXGE_HW_ERR_INVALID_HANDLE;
2049 		goto exit;
2050 	}
2051 
2052 	vpath = vp->vpath;
2053 
2054 	val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2055 
2056 	if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
2057 		val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN;
2058 		writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2059 	}
2060 exit:
2061 	return status;
2062 }
2063 
2064 /**
2065  * vxge_hw_vpath_mcast_enable - Enable multicast addresses.
2066  * @vp: Vpath handle.
2067  *
2068  * Enable Titan-e multicast addresses.
2069  * Returns: VXGE_HW_OK on success.
2070  *
2071  */
vxge_hw_vpath_mcast_enable(struct __vxge_hw_vpath_handle * vp)2072 enum vxge_hw_status vxge_hw_vpath_mcast_enable(
2073 			struct __vxge_hw_vpath_handle *vp)
2074 {
2075 	u64 val64;
2076 	struct __vxge_hw_virtualpath *vpath;
2077 	enum vxge_hw_status status = VXGE_HW_OK;
2078 
2079 	if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2080 		status = VXGE_HW_ERR_INVALID_HANDLE;
2081 		goto exit;
2082 	}
2083 
2084 	vpath = vp->vpath;
2085 
2086 	val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2087 
2088 	if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) {
2089 		val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
2090 		writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2091 	}
2092 exit:
2093 	return status;
2094 }
2095 
2096 /**
2097  * vxge_hw_vpath_mcast_disable - Disable  multicast addresses.
2098  * @vp: Vpath handle.
2099  *
2100  * Disable Titan-e multicast addresses.
2101  * Returns: VXGE_HW_OK - success.
2102  * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle
2103  *
2104  */
2105 enum vxge_hw_status
vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle * vp)2106 vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp)
2107 {
2108 	u64 val64;
2109 	struct __vxge_hw_virtualpath *vpath;
2110 	enum vxge_hw_status status = VXGE_HW_OK;
2111 
2112 	if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2113 		status = VXGE_HW_ERR_INVALID_HANDLE;
2114 		goto exit;
2115 	}
2116 
2117 	vpath = vp->vpath;
2118 
2119 	val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2120 
2121 	if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) {
2122 		val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
2123 		writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2124 	}
2125 exit:
2126 	return status;
2127 }
2128 
2129 /*
2130  * vxge_hw_vpath_alarm_process - Process Alarms.
2131  * @vpath: Virtual Path.
2132  * @skip_alarms: Do not clear the alarms
2133  *
2134  * Process vpath alarms.
2135  *
2136  */
vxge_hw_vpath_alarm_process(struct __vxge_hw_vpath_handle * vp,u32 skip_alarms)2137 enum vxge_hw_status vxge_hw_vpath_alarm_process(
2138 			struct __vxge_hw_vpath_handle *vp,
2139 			u32 skip_alarms)
2140 {
2141 	enum vxge_hw_status status = VXGE_HW_OK;
2142 
2143 	if (vp == NULL) {
2144 		status = VXGE_HW_ERR_INVALID_HANDLE;
2145 		goto exit;
2146 	}
2147 
2148 	status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms);
2149 exit:
2150 	return status;
2151 }
2152 
2153 /**
2154  * vxge_hw_vpath_msix_set - Associate MSIX vectors with TIM interrupts and
2155  *                            alrms
2156  * @vp: Virtual Path handle.
2157  * @tim_msix_id: MSIX vectors associated with VXGE_HW_MAX_INTR_PER_VP number of
2158  *             interrupts(Can be repeated). If fifo or ring are not enabled
2159  *             the MSIX vector for that should be set to 0
2160  * @alarm_msix_id: MSIX vector for alarm.
2161  *
2162  * This API will associate a given MSIX vector numbers with the four TIM
2163  * interrupts and alarm interrupt.
2164  */
2165 void
vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle * vp,int * tim_msix_id,int alarm_msix_id)2166 vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
2167 		       int alarm_msix_id)
2168 {
2169 	u64 val64;
2170 	struct __vxge_hw_virtualpath *vpath = vp->vpath;
2171 	struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
2172 	u32 vp_id = vp->vpath->vp_id;
2173 
2174 	val64 =  VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
2175 		  (vp_id * 4) + tim_msix_id[0]) |
2176 		 VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
2177 		  (vp_id * 4) + tim_msix_id[1]);
2178 
2179 	writeq(val64, &vp_reg->interrupt_cfg0);
2180 
2181 	writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
2182 			(vpath->hldev->first_vp_id * 4) + alarm_msix_id),
2183 			&vp_reg->interrupt_cfg2);
2184 
2185 	if (vpath->hldev->config.intr_mode ==
2186 					VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2187 		__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2188 				VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN,
2189 				0, 32), &vp_reg->one_shot_vect0_en);
2190 		__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2191 				VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
2192 				0, 32), &vp_reg->one_shot_vect1_en);
2193 		__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2194 				VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
2195 				0, 32), &vp_reg->one_shot_vect2_en);
2196 	}
2197 }
2198 
2199 /**
2200  * vxge_hw_vpath_msix_mask - Mask MSIX Vector.
2201  * @vp: Virtual Path handle.
2202  * @msix_id:  MSIX ID
2203  *
2204  * The function masks the msix interrupt for the given msix_id
2205  *
2206  * Returns: 0,
2207  * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2208  * status.
2209  * See also:
2210  */
2211 void
vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle * vp,int msix_id)2212 vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2213 {
2214 	struct __vxge_hw_device *hldev = vp->vpath->hldev;
2215 	__vxge_hw_pio_mem_write32_upper(
2216 		(u32) vxge_bVALn(vxge_mBIT(msix_id  >> 2), 0, 32),
2217 		&hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
2218 }
2219 
2220 /**
2221  * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
2222  * @vp: Virtual Path handle.
2223  * @msix_id:  MSI ID
2224  *
2225  * The function clears the msix interrupt for the given msix_id
2226  *
2227  * Returns: 0,
2228  * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2229  * status.
2230  * See also:
2231  */
vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle * vp,int msix_id)2232 void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
2233 {
2234 	struct __vxge_hw_device *hldev = vp->vpath->hldev;
2235 
2236 	if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT)
2237 		__vxge_hw_pio_mem_write32_upper(
2238 			(u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
2239 			&hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
2240 	else
2241 		__vxge_hw_pio_mem_write32_upper(
2242 			(u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
2243 			&hldev->common_reg->clear_msix_mask_vect[msix_id % 4]);
2244 }
2245 
2246 /**
2247  * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
2248  * @vp: Virtual Path handle.
2249  * @msix_id:  MSI ID
2250  *
2251  * The function unmasks the msix interrupt for the given msix_id
2252  *
2253  * Returns: 0,
2254  * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2255  * status.
2256  * See also:
2257  */
2258 void
vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle * vp,int msix_id)2259 vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2260 {
2261 	struct __vxge_hw_device *hldev = vp->vpath->hldev;
2262 	__vxge_hw_pio_mem_write32_upper(
2263 			(u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2264 			&hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
2265 }
2266 
2267 /**
2268  * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
2269  * @vp: Virtual Path handle.
2270  *
2271  * Mask Tx and Rx vpath interrupts.
2272  *
2273  * See also: vxge_hw_vpath_inta_mask_tx_rx()
2274  */
vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle * vp)2275 void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2276 {
2277 	u64	tim_int_mask0[4] = {[0 ...3] = 0};
2278 	u32	tim_int_mask1[4] = {[0 ...3] = 0};
2279 	u64	val64;
2280 	struct __vxge_hw_device *hldev = vp->vpath->hldev;
2281 
2282 	VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2283 		tim_int_mask1, vp->vpath->vp_id);
2284 
2285 	val64 = readq(&hldev->common_reg->tim_int_mask0);
2286 
2287 	if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2288 		(tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2289 		writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2290 			tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64),
2291 			&hldev->common_reg->tim_int_mask0);
2292 	}
2293 
2294 	val64 = readl(&hldev->common_reg->tim_int_mask1);
2295 
2296 	if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2297 		(tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2298 		__vxge_hw_pio_mem_write32_upper(
2299 			(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2300 			tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64),
2301 			&hldev->common_reg->tim_int_mask1);
2302 	}
2303 }
2304 
2305 /**
2306  * vxge_hw_vpath_inta_unmask_tx_rx - Unmask Tx and Rx interrupts.
2307  * @vp: Virtual Path handle.
2308  *
2309  * Unmask Tx and Rx vpath interrupts.
2310  *
2311  * See also: vxge_hw_vpath_inta_mask_tx_rx()
2312  */
vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle * vp)2313 void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2314 {
2315 	u64	tim_int_mask0[4] = {[0 ...3] = 0};
2316 	u32	tim_int_mask1[4] = {[0 ...3] = 0};
2317 	u64	val64;
2318 	struct __vxge_hw_device *hldev = vp->vpath->hldev;
2319 
2320 	VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2321 		tim_int_mask1, vp->vpath->vp_id);
2322 
2323 	val64 = readq(&hldev->common_reg->tim_int_mask0);
2324 
2325 	if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2326 	   (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2327 		writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2328 			tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64,
2329 			&hldev->common_reg->tim_int_mask0);
2330 	}
2331 
2332 	if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2333 	   (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2334 		__vxge_hw_pio_mem_write32_upper(
2335 			(~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2336 			  tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64,
2337 			&hldev->common_reg->tim_int_mask1);
2338 	}
2339 }
2340 
2341 /**
2342  * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed
2343  * descriptors and process the same.
2344  * @ring: Handle to the ring object used for receive
2345  *
2346  * The function	polls the Rx for the completed	descriptors and	calls
2347  * the driver via supplied completion	callback.
2348  *
2349  * Returns: VXGE_HW_OK, if the polling is completed successful.
2350  * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2351  * descriptors available which are yet to be processed.
2352  *
2353  * See also: vxge_hw_vpath_poll_rx()
2354  */
vxge_hw_vpath_poll_rx(struct __vxge_hw_ring * ring)2355 enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
2356 {
2357 	u8 t_code;
2358 	enum vxge_hw_status status = VXGE_HW_OK;
2359 	void *first_rxdh;
2360 	int new_count = 0;
2361 
2362 	ring->cmpl_cnt = 0;
2363 
2364 	status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code);
2365 	if (status == VXGE_HW_OK)
2366 		ring->callback(ring, first_rxdh,
2367 			t_code, ring->channel.userdata);
2368 
2369 	if (ring->cmpl_cnt != 0) {
2370 		ring->doorbell_cnt += ring->cmpl_cnt;
2371 		if (ring->doorbell_cnt >= ring->rxds_limit) {
2372 			/*
2373 			 * Each RxD is of 4 qwords, update the number of
2374 			 * qwords replenished
2375 			 */
2376 			new_count = (ring->doorbell_cnt * 4);
2377 
2378 			/* For each block add 4 more qwords */
2379 			ring->total_db_cnt += ring->doorbell_cnt;
2380 			if (ring->total_db_cnt >= ring->rxds_per_block) {
2381 				new_count += 4;
2382 				/* Reset total count */
2383 				ring->total_db_cnt %= ring->rxds_per_block;
2384 			}
2385 			writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count),
2386 				&ring->vp_reg->prc_rxd_doorbell);
2387 			readl(&ring->common_reg->titan_general_int_status);
2388 			ring->doorbell_cnt = 0;
2389 		}
2390 	}
2391 
2392 	return status;
2393 }
2394 
2395 /**
2396  * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process the same.
2397  * @fifo: Handle to the fifo object used for non offload send
2398  * @skb_ptr: pointer to skb
2399  * @nr_skb: number of skbs
2400  * @more: more is coming
2401  *
2402  * The function polls the Tx for the completed descriptors and calls
2403  * the driver via supplied completion callback.
2404  *
2405  * Returns: VXGE_HW_OK, if the polling is completed successful.
2406  * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2407  * descriptors available which are yet to be processed.
2408  */
vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo * fifo,struct sk_buff *** skb_ptr,int nr_skb,int * more)2409 enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo,
2410 					struct sk_buff ***skb_ptr, int nr_skb,
2411 					int *more)
2412 {
2413 	enum vxge_hw_fifo_tcode t_code;
2414 	void *first_txdlh;
2415 	enum vxge_hw_status status = VXGE_HW_OK;
2416 	struct __vxge_hw_channel *channel;
2417 
2418 	channel = &fifo->channel;
2419 
2420 	status = vxge_hw_fifo_txdl_next_completed(fifo,
2421 				&first_txdlh, &t_code);
2422 	if (status == VXGE_HW_OK)
2423 		if (fifo->callback(fifo, first_txdlh, t_code,
2424 			channel->userdata, skb_ptr, nr_skb, more) != VXGE_HW_OK)
2425 			status = VXGE_HW_COMPLETIONS_REMAIN;
2426 
2427 	return status;
2428 }
2429