• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /******************************************************************************
2  * This software may be used and distributed according to the terms of
3  * the GNU General Public License (GPL), incorporated herein by reference.
4  * Drivers based on or derived from this code fall under the GPL and must
5  * retain the authorship, copyright and license notice.  This file is not
6  * a complete program and may only be used when the entire operating
7  * system is licensed under the GPL.
8  * See the file COPYING in this distribution for more information.
9  *
10  * vxge-config.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11  *                Virtualized Server Adapter.
12  * Copyright(c) 2002-2010 Exar Corp.
13  ******************************************************************************/
14 #include <linux/vmalloc.h>
15 #include <linux/etherdevice.h>
16 #include <linux/pci.h>
17 #include <linux/pci_hotplug.h>
18 #include <linux/slab.h>
19 
20 #include "vxge-traffic.h"
21 #include "vxge-config.h"
22 #include "vxge-main.h"
23 
24 #define VXGE_HW_VPATH_STATS_PIO_READ(offset) {				\
25 	status = __vxge_hw_vpath_stats_access(vpath,			\
26 					      VXGE_HW_STATS_OP_READ,	\
27 					      offset,			\
28 					      &val64);			\
29 	if (status != VXGE_HW_OK)					\
30 		return status;						\
31 }
32 
33 static void
vxge_hw_vpath_set_zero_rx_frm_len(struct vxge_hw_vpath_reg __iomem * vp_reg)34 vxge_hw_vpath_set_zero_rx_frm_len(struct vxge_hw_vpath_reg __iomem *vp_reg)
35 {
36 	u64 val64;
37 
38 	val64 = readq(&vp_reg->rxmac_vcfg0);
39 	val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
40 	writeq(val64, &vp_reg->rxmac_vcfg0);
41 	val64 = readq(&vp_reg->rxmac_vcfg0);
42 }
43 
44 /*
45  * vxge_hw_vpath_wait_receive_idle - Wait for Rx to become idle
46  */
vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device * hldev,u32 vp_id)47 int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id)
48 {
49 	struct vxge_hw_vpath_reg __iomem *vp_reg;
50 	struct __vxge_hw_virtualpath *vpath;
51 	u64 val64, rxd_count, rxd_spat;
52 	int count = 0, total_count = 0;
53 
54 	vpath = &hldev->virtual_paths[vp_id];
55 	vp_reg = vpath->vp_reg;
56 
57 	vxge_hw_vpath_set_zero_rx_frm_len(vp_reg);
58 
59 	/* Check that the ring controller for this vpath has enough free RxDs
60 	 * to send frames to the host.  This is done by reading the
61 	 * PRC_RXD_DOORBELL_VPn register and comparing the read value to the
62 	 * RXD_SPAT value for the vpath.
63 	 */
64 	val64 = readq(&vp_reg->prc_cfg6);
65 	rxd_spat = VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val64) + 1;
66 	/* Use a factor of 2 when comparing rxd_count against rxd_spat for some
67 	 * leg room.
68 	 */
69 	rxd_spat *= 2;
70 
71 	do {
72 		mdelay(1);
73 
74 		rxd_count = readq(&vp_reg->prc_rxd_doorbell);
75 
76 		/* Check that the ring controller for this vpath does
77 		 * not have any frame in its pipeline.
78 		 */
79 		val64 = readq(&vp_reg->frm_in_progress_cnt);
80 		if ((rxd_count <= rxd_spat) || (val64 > 0))
81 			count = 0;
82 		else
83 			count++;
84 		total_count++;
85 	} while ((count < VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT) &&
86 			(total_count < VXGE_HW_MAX_POLLING_COUNT));
87 
88 	if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
89 		printk(KERN_ALERT "%s: Still Receiving traffic. Abort wait\n",
90 			__func__);
91 
92 	return total_count;
93 }
94 
95 /* vxge_hw_device_wait_receive_idle - This function waits until all frames
96  * stored in the frame buffer for each vpath assigned to the given
97  * function (hldev) have been sent to the host.
98  */
vxge_hw_device_wait_receive_idle(struct __vxge_hw_device * hldev)99 void vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev)
100 {
101 	int i, total_count = 0;
102 
103 	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
104 		if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
105 			continue;
106 
107 		total_count += vxge_hw_vpath_wait_receive_idle(hldev, i);
108 		if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
109 			break;
110 	}
111 }
112 
113 /*
114  * __vxge_hw_device_register_poll
115  * Will poll certain register for specified amount of time.
116  * Will poll until masked bit is not cleared.
117  */
118 static enum vxge_hw_status
__vxge_hw_device_register_poll(void __iomem * reg,u64 mask,u32 max_millis)119 __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
120 {
121 	u64 val64;
122 	u32 i = 0;
123 	enum vxge_hw_status ret = VXGE_HW_FAIL;
124 
125 	udelay(10);
126 
127 	do {
128 		val64 = readq(reg);
129 		if (!(val64 & mask))
130 			return VXGE_HW_OK;
131 		udelay(100);
132 	} while (++i <= 9);
133 
134 	i = 0;
135 	do {
136 		val64 = readq(reg);
137 		if (!(val64 & mask))
138 			return VXGE_HW_OK;
139 		mdelay(1);
140 	} while (++i <= max_millis);
141 
142 	return ret;
143 }
144 
145 static inline enum vxge_hw_status
__vxge_hw_pio_mem_write64(u64 val64,void __iomem * addr,u64 mask,u32 max_millis)146 __vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
147 			  u64 mask, u32 max_millis)
148 {
149 	__vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
150 	wmb();
151 	__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
152 	wmb();
153 
154 	return __vxge_hw_device_register_poll(addr, mask, max_millis);
155 }
156 
157 static enum vxge_hw_status
vxge_hw_vpath_fw_api(struct __vxge_hw_virtualpath * vpath,u32 action,u32 fw_memo,u32 offset,u64 * data0,u64 * data1,u64 * steer_ctrl)158 vxge_hw_vpath_fw_api(struct __vxge_hw_virtualpath *vpath, u32 action,
159 		     u32 fw_memo, u32 offset, u64 *data0, u64 *data1,
160 		     u64 *steer_ctrl)
161 {
162 	struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
163 	enum vxge_hw_status status;
164 	u64 val64;
165 	u32 retry = 0, max_retry = 3;
166 
167 	spin_lock(&vpath->lock);
168 	if (!vpath->vp_open) {
169 		spin_unlock(&vpath->lock);
170 		max_retry = 100;
171 	}
172 
173 	writeq(*data0, &vp_reg->rts_access_steer_data0);
174 	writeq(*data1, &vp_reg->rts_access_steer_data1);
175 	wmb();
176 
177 	val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
178 		VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(fw_memo) |
179 		VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset) |
180 		VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
181 		*steer_ctrl;
182 
183 	status = __vxge_hw_pio_mem_write64(val64,
184 					   &vp_reg->rts_access_steer_ctrl,
185 					   VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
186 					   VXGE_HW_DEF_DEVICE_POLL_MILLIS);
187 
188 	/* The __vxge_hw_device_register_poll can udelay for a significant
189 	 * amount of time, blocking other process from the CPU.  If it delays
190 	 * for ~5secs, a NMI error can occur.  A way around this is to give up
191 	 * the processor via msleep, but this is not allowed is under lock.
192 	 * So, only allow it to sleep for ~4secs if open.  Otherwise, delay for
193 	 * 1sec and sleep for 10ms until the firmware operation has completed
194 	 * or timed-out.
195 	 */
196 	while ((status != VXGE_HW_OK) && retry++ < max_retry) {
197 		if (!vpath->vp_open)
198 			msleep(20);
199 		status = __vxge_hw_device_register_poll(
200 					&vp_reg->rts_access_steer_ctrl,
201 					VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
202 					VXGE_HW_DEF_DEVICE_POLL_MILLIS);
203 	}
204 
205 	if (status != VXGE_HW_OK)
206 		goto out;
207 
208 	val64 = readq(&vp_reg->rts_access_steer_ctrl);
209 	if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
210 		*data0 = readq(&vp_reg->rts_access_steer_data0);
211 		*data1 = readq(&vp_reg->rts_access_steer_data1);
212 		*steer_ctrl = val64;
213 	} else
214 		status = VXGE_HW_FAIL;
215 
216 out:
217 	if (vpath->vp_open)
218 		spin_unlock(&vpath->lock);
219 	return status;
220 }
221 
222 enum vxge_hw_status
vxge_hw_upgrade_read_version(struct __vxge_hw_device * hldev,u32 * major,u32 * minor,u32 * build)223 vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major,
224 			     u32 *minor, u32 *build)
225 {
226 	u64 data0 = 0, data1 = 0, steer_ctrl = 0;
227 	struct __vxge_hw_virtualpath *vpath;
228 	enum vxge_hw_status status;
229 
230 	vpath = &hldev->virtual_paths[hldev->first_vp_id];
231 
232 	status = vxge_hw_vpath_fw_api(vpath,
233 				      VXGE_HW_FW_UPGRADE_ACTION,
234 				      VXGE_HW_FW_UPGRADE_MEMO,
235 				      VXGE_HW_FW_UPGRADE_OFFSET_READ,
236 				      &data0, &data1, &steer_ctrl);
237 	if (status != VXGE_HW_OK)
238 		return status;
239 
240 	*major = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
241 	*minor = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
242 	*build = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
243 
244 	return status;
245 }
246 
vxge_hw_flash_fw(struct __vxge_hw_device * hldev)247 enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev)
248 {
249 	u64 data0 = 0, data1 = 0, steer_ctrl = 0;
250 	struct __vxge_hw_virtualpath *vpath;
251 	enum vxge_hw_status status;
252 	u32 ret;
253 
254 	vpath = &hldev->virtual_paths[hldev->first_vp_id];
255 
256 	status = vxge_hw_vpath_fw_api(vpath,
257 				      VXGE_HW_FW_UPGRADE_ACTION,
258 				      VXGE_HW_FW_UPGRADE_MEMO,
259 				      VXGE_HW_FW_UPGRADE_OFFSET_COMMIT,
260 				      &data0, &data1, &steer_ctrl);
261 	if (status != VXGE_HW_OK) {
262 		vxge_debug_init(VXGE_ERR, "%s: FW upgrade failed", __func__);
263 		goto exit;
264 	}
265 
266 	ret = VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(steer_ctrl) & 0x7F;
267 	if (ret != 1) {
268 		vxge_debug_init(VXGE_ERR, "%s: FW commit failed with error %d",
269 				__func__, ret);
270 		status = VXGE_HW_FAIL;
271 	}
272 
273 exit:
274 	return status;
275 }
276 
277 enum vxge_hw_status
vxge_update_fw_image(struct __vxge_hw_device * hldev,const u8 * fwdata,int size)278 vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *fwdata, int size)
279 {
280 	u64 data0 = 0, data1 = 0, steer_ctrl = 0;
281 	struct __vxge_hw_virtualpath *vpath;
282 	enum vxge_hw_status status;
283 	int ret_code, sec_code;
284 
285 	vpath = &hldev->virtual_paths[hldev->first_vp_id];
286 
287 	/* send upgrade start command */
288 	status = vxge_hw_vpath_fw_api(vpath,
289 				      VXGE_HW_FW_UPGRADE_ACTION,
290 				      VXGE_HW_FW_UPGRADE_MEMO,
291 				      VXGE_HW_FW_UPGRADE_OFFSET_START,
292 				      &data0, &data1, &steer_ctrl);
293 	if (status != VXGE_HW_OK) {
294 		vxge_debug_init(VXGE_ERR, " %s: Upgrade start cmd failed",
295 				__func__);
296 		return status;
297 	}
298 
299 	/* Transfer fw image to adapter 16 bytes at a time */
300 	for (; size > 0; size -= VXGE_HW_FW_UPGRADE_BLK_SIZE) {
301 		steer_ctrl = 0;
302 
303 		/* The next 128bits of fwdata to be loaded onto the adapter */
304 		data0 = *((u64 *)fwdata);
305 		data1 = *((u64 *)fwdata + 1);
306 
307 		status = vxge_hw_vpath_fw_api(vpath,
308 					      VXGE_HW_FW_UPGRADE_ACTION,
309 					      VXGE_HW_FW_UPGRADE_MEMO,
310 					      VXGE_HW_FW_UPGRADE_OFFSET_SEND,
311 					      &data0, &data1, &steer_ctrl);
312 		if (status != VXGE_HW_OK) {
313 			vxge_debug_init(VXGE_ERR, "%s: Upgrade send failed",
314 					__func__);
315 			goto out;
316 		}
317 
318 		ret_code = VXGE_HW_UPGRADE_GET_RET_ERR_CODE(data0);
319 		switch (ret_code) {
320 		case VXGE_HW_FW_UPGRADE_OK:
321 			/* All OK, send next 16 bytes. */
322 			break;
323 		case VXGE_FW_UPGRADE_BYTES2SKIP:
324 			/* skip bytes in the stream */
325 			fwdata += (data0 >> 8) & 0xFFFFFFFF;
326 			break;
327 		case VXGE_HW_FW_UPGRADE_DONE:
328 			goto out;
329 		case VXGE_HW_FW_UPGRADE_ERR:
330 			sec_code = VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(data0);
331 			switch (sec_code) {
332 			case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1:
333 			case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7:
334 				printk(KERN_ERR
335 				       "corrupted data from .ncf file\n");
336 				break;
337 			case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3:
338 			case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4:
339 			case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5:
340 			case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6:
341 			case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8:
342 				printk(KERN_ERR "invalid .ncf file\n");
343 				break;
344 			case VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW:
345 				printk(KERN_ERR "buffer overflow\n");
346 				break;
347 			case VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH:
348 				printk(KERN_ERR "failed to flash the image\n");
349 				break;
350 			case VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN:
351 				printk(KERN_ERR
352 				       "generic error. Unknown error type\n");
353 				break;
354 			default:
355 				printk(KERN_ERR "Unknown error of type %d\n",
356 				       sec_code);
357 				break;
358 			}
359 			status = VXGE_HW_FAIL;
360 			goto out;
361 		default:
362 			printk(KERN_ERR "Unknown FW error: %d\n", ret_code);
363 			status = VXGE_HW_FAIL;
364 			goto out;
365 		}
366 		/* point to next 16 bytes */
367 		fwdata += VXGE_HW_FW_UPGRADE_BLK_SIZE;
368 	}
369 out:
370 	return status;
371 }
372 
373 enum vxge_hw_status
vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device * hldev,struct eprom_image * img)374 vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
375 				struct eprom_image *img)
376 {
377 	u64 data0 = 0, data1 = 0, steer_ctrl = 0;
378 	struct __vxge_hw_virtualpath *vpath;
379 	enum vxge_hw_status status;
380 	int i;
381 
382 	vpath = &hldev->virtual_paths[hldev->first_vp_id];
383 
384 	for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
385 		data0 = VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(i);
386 		data1 = steer_ctrl = 0;
387 
388 		status = vxge_hw_vpath_fw_api(vpath,
389 			VXGE_HW_FW_API_GET_EPROM_REV,
390 			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
391 			0, &data0, &data1, &steer_ctrl);
392 		if (status != VXGE_HW_OK)
393 			break;
394 
395 		img[i].is_valid = VXGE_HW_GET_EPROM_IMAGE_VALID(data0);
396 		img[i].index = VXGE_HW_GET_EPROM_IMAGE_INDEX(data0);
397 		img[i].type = VXGE_HW_GET_EPROM_IMAGE_TYPE(data0);
398 		img[i].version = VXGE_HW_GET_EPROM_IMAGE_REV(data0);
399 	}
400 
401 	return status;
402 }
403 
404 /*
405  * __vxge_hw_channel_free - Free memory allocated for channel
406  * This function deallocates memory from the channel and various arrays
407  * in the channel
408  */
__vxge_hw_channel_free(struct __vxge_hw_channel * channel)409 static void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
410 {
411 	kfree(channel->work_arr);
412 	kfree(channel->free_arr);
413 	kfree(channel->reserve_arr);
414 	kfree(channel->orig_arr);
415 	kfree(channel);
416 }
417 
418 /*
419  * __vxge_hw_channel_initialize - Initialize a channel
420  * This function initializes a channel by properly setting the
421  * various references
422  */
423 static enum vxge_hw_status
__vxge_hw_channel_initialize(struct __vxge_hw_channel * channel)424 __vxge_hw_channel_initialize(struct __vxge_hw_channel *channel)
425 {
426 	u32 i;
427 	struct __vxge_hw_virtualpath *vpath;
428 
429 	vpath = channel->vph->vpath;
430 
431 	if ((channel->reserve_arr != NULL) && (channel->orig_arr != NULL)) {
432 		for (i = 0; i < channel->length; i++)
433 			channel->orig_arr[i] = channel->reserve_arr[i];
434 	}
435 
436 	switch (channel->type) {
437 	case VXGE_HW_CHANNEL_TYPE_FIFO:
438 		vpath->fifoh = (struct __vxge_hw_fifo *)channel;
439 		channel->stats = &((struct __vxge_hw_fifo *)
440 				channel)->stats->common_stats;
441 		break;
442 	case VXGE_HW_CHANNEL_TYPE_RING:
443 		vpath->ringh = (struct __vxge_hw_ring *)channel;
444 		channel->stats = &((struct __vxge_hw_ring *)
445 				channel)->stats->common_stats;
446 		break;
447 	default:
448 		break;
449 	}
450 
451 	return VXGE_HW_OK;
452 }
453 
454 /*
455  * __vxge_hw_channel_reset - Resets a channel
456  * This function resets a channel by properly setting the various references
457  */
458 static enum vxge_hw_status
__vxge_hw_channel_reset(struct __vxge_hw_channel * channel)459 __vxge_hw_channel_reset(struct __vxge_hw_channel *channel)
460 {
461 	u32 i;
462 
463 	for (i = 0; i < channel->length; i++) {
464 		if (channel->reserve_arr != NULL)
465 			channel->reserve_arr[i] = channel->orig_arr[i];
466 		if (channel->free_arr != NULL)
467 			channel->free_arr[i] = NULL;
468 		if (channel->work_arr != NULL)
469 			channel->work_arr[i] = NULL;
470 	}
471 	channel->free_ptr = channel->length;
472 	channel->reserve_ptr = channel->length;
473 	channel->reserve_top = 0;
474 	channel->post_index = 0;
475 	channel->compl_index = 0;
476 
477 	return VXGE_HW_OK;
478 }
479 
480 /*
481  * __vxge_hw_device_pci_e_init
482  * Initialize certain PCI/PCI-X configuration registers
483  * with recommended values. Save config space for future hw resets.
484  */
__vxge_hw_device_pci_e_init(struct __vxge_hw_device * hldev)485 static void __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
486 {
487 	u16 cmd = 0;
488 
489 	/* Set the PErr Repconse bit and SERR in PCI command register. */
490 	pci_read_config_word(hldev->pdev, PCI_COMMAND, &cmd);
491 	cmd |= 0x140;
492 	pci_write_config_word(hldev->pdev, PCI_COMMAND, cmd);
493 
494 	pci_save_state(hldev->pdev);
495 }
496 
497 /* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
498  * in progress
499  * This routine checks the vpath reset in progress register is turned zero
500  */
501 static enum vxge_hw_status
__vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem * vpath_rst_in_prog)502 __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
503 {
504 	enum vxge_hw_status status;
505 	status = __vxge_hw_device_register_poll(vpath_rst_in_prog,
506 			VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(0x1ffff),
507 			VXGE_HW_DEF_DEVICE_POLL_MILLIS);
508 	return status;
509 }
510 
511 /*
512  * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
513  * Set the swapper bits appropriately for the lagacy section.
514  */
515 static enum vxge_hw_status
__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem * legacy_reg)516 __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
517 {
518 	u64 val64;
519 	enum vxge_hw_status status = VXGE_HW_OK;
520 
521 	val64 = readq(&legacy_reg->toc_swapper_fb);
522 
523 	wmb();
524 
525 	switch (val64) {
526 	case VXGE_HW_SWAPPER_INITIAL_VALUE:
527 		return status;
528 
529 	case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED:
530 		writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
531 			&legacy_reg->pifm_rd_swap_en);
532 		writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
533 			&legacy_reg->pifm_rd_flip_en);
534 		writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
535 			&legacy_reg->pifm_wr_swap_en);
536 		writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
537 			&legacy_reg->pifm_wr_flip_en);
538 		break;
539 
540 	case VXGE_HW_SWAPPER_BYTE_SWAPPED:
541 		writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
542 			&legacy_reg->pifm_rd_swap_en);
543 		writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
544 			&legacy_reg->pifm_wr_swap_en);
545 		break;
546 
547 	case VXGE_HW_SWAPPER_BIT_FLIPPED:
548 		writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
549 			&legacy_reg->pifm_rd_flip_en);
550 		writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
551 			&legacy_reg->pifm_wr_flip_en);
552 		break;
553 	}
554 
555 	wmb();
556 
557 	val64 = readq(&legacy_reg->toc_swapper_fb);
558 
559 	if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE)
560 		status = VXGE_HW_ERR_SWAPPER_CTRL;
561 
562 	return status;
563 }
564 
565 /*
566  * __vxge_hw_device_toc_get
567  * This routine sets the swapper and reads the toc pointer and returns the
568  * memory mapped address of the toc
569  */
570 static struct vxge_hw_toc_reg __iomem *
__vxge_hw_device_toc_get(void __iomem * bar0)571 __vxge_hw_device_toc_get(void __iomem *bar0)
572 {
573 	u64 val64;
574 	struct vxge_hw_toc_reg __iomem *toc = NULL;
575 	enum vxge_hw_status status;
576 
577 	struct vxge_hw_legacy_reg __iomem *legacy_reg =
578 		(struct vxge_hw_legacy_reg __iomem *)bar0;
579 
580 	status = __vxge_hw_legacy_swapper_set(legacy_reg);
581 	if (status != VXGE_HW_OK)
582 		goto exit;
583 
584 	val64 =	readq(&legacy_reg->toc_first_pointer);
585 	toc = bar0 + val64;
586 exit:
587 	return toc;
588 }
589 
590 /*
591  * __vxge_hw_device_reg_addr_get
592  * This routine sets the swapper and reads the toc pointer and initializes the
593  * register location pointers in the device object. It waits until the ric is
594  * completed initializing registers.
595  */
596 static enum vxge_hw_status
__vxge_hw_device_reg_addr_get(struct __vxge_hw_device * hldev)597 __vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev)
598 {
599 	u64 val64;
600 	u32 i;
601 	enum vxge_hw_status status = VXGE_HW_OK;
602 
603 	hldev->legacy_reg = hldev->bar0;
604 
605 	hldev->toc_reg = __vxge_hw_device_toc_get(hldev->bar0);
606 	if (hldev->toc_reg  == NULL) {
607 		status = VXGE_HW_FAIL;
608 		goto exit;
609 	}
610 
611 	val64 = readq(&hldev->toc_reg->toc_common_pointer);
612 	hldev->common_reg = hldev->bar0 + val64;
613 
614 	val64 = readq(&hldev->toc_reg->toc_mrpcim_pointer);
615 	hldev->mrpcim_reg = hldev->bar0 + val64;
616 
617 	for (i = 0; i < VXGE_HW_TITAN_SRPCIM_REG_SPACES; i++) {
618 		val64 = readq(&hldev->toc_reg->toc_srpcim_pointer[i]);
619 		hldev->srpcim_reg[i] = hldev->bar0 + val64;
620 	}
621 
622 	for (i = 0; i < VXGE_HW_TITAN_VPMGMT_REG_SPACES; i++) {
623 		val64 = readq(&hldev->toc_reg->toc_vpmgmt_pointer[i]);
624 		hldev->vpmgmt_reg[i] = hldev->bar0 + val64;
625 	}
626 
627 	for (i = 0; i < VXGE_HW_TITAN_VPATH_REG_SPACES; i++) {
628 		val64 = readq(&hldev->toc_reg->toc_vpath_pointer[i]);
629 		hldev->vpath_reg[i] = hldev->bar0 + val64;
630 	}
631 
632 	val64 = readq(&hldev->toc_reg->toc_kdfc);
633 
634 	switch (VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val64)) {
635 	case 0:
636 		hldev->kdfc = hldev->bar0 + VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64) ;
637 		break;
638 	default:
639 		break;
640 	}
641 
642 	status = __vxge_hw_device_vpath_reset_in_prog_check(
643 			(u64 __iomem *)&hldev->common_reg->vpath_rst_in_prog);
644 exit:
645 	return status;
646 }
647 
648 /*
649  * __vxge_hw_device_access_rights_get: Get Access Rights of the driver
650  * This routine returns the Access Rights of the driver
651  */
652 static u32
__vxge_hw_device_access_rights_get(u32 host_type,u32 func_id)653 __vxge_hw_device_access_rights_get(u32 host_type, u32 func_id)
654 {
655 	u32 access_rights = VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH;
656 
657 	switch (host_type) {
658 	case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION:
659 		if (func_id == 0) {
660 			access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
661 					VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
662 		}
663 		break;
664 	case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION:
665 		access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
666 				VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
667 		break;
668 	case VXGE_HW_NO_MR_SR_VH0_FUNCTION0:
669 		access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
670 				VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
671 		break;
672 	case VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION:
673 	case VXGE_HW_SR_VH_VIRTUAL_FUNCTION:
674 	case VXGE_HW_MR_SR_VH0_INVALID_CONFIG:
675 		break;
676 	case VXGE_HW_SR_VH_FUNCTION0:
677 	case VXGE_HW_VH_NORMAL_FUNCTION:
678 		access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
679 		break;
680 	}
681 
682 	return access_rights;
683 }
684 /*
685  * __vxge_hw_device_is_privilaged
686  * This routine checks if the device function is privilaged or not
687  */
688 
689 enum vxge_hw_status
__vxge_hw_device_is_privilaged(u32 host_type,u32 func_id)690 __vxge_hw_device_is_privilaged(u32 host_type, u32 func_id)
691 {
692 	if (__vxge_hw_device_access_rights_get(host_type,
693 		func_id) &
694 		VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)
695 		return VXGE_HW_OK;
696 	else
697 		return VXGE_HW_ERR_PRIVILAGED_OPEARATION;
698 }
699 
700 /*
701  * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
702  * Returns the function number of the vpath.
703  */
704 static u32
__vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem * vpmgmt_reg)705 __vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
706 {
707 	u64 val64;
708 
709 	val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
710 
711 	return
712 	 (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
713 }
714 
715 /*
716  * __vxge_hw_device_host_info_get
717  * This routine returns the host type assignments
718  */
__vxge_hw_device_host_info_get(struct __vxge_hw_device * hldev)719 static void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
720 {
721 	u64 val64;
722 	u32 i;
723 
724 	val64 = readq(&hldev->common_reg->host_type_assignments);
725 
726 	hldev->host_type =
727 	   (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
728 
729 	hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments);
730 
731 	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
732 		if (!(hldev->vpath_assignments & vxge_mBIT(i)))
733 			continue;
734 
735 		hldev->func_id =
736 			__vxge_hw_vpath_func_id_get(hldev->vpmgmt_reg[i]);
737 
738 		hldev->access_rights = __vxge_hw_device_access_rights_get(
739 			hldev->host_type, hldev->func_id);
740 
741 		hldev->virtual_paths[i].vp_open = VXGE_HW_VP_NOT_OPEN;
742 		hldev->virtual_paths[i].vp_reg = hldev->vpath_reg[i];
743 
744 		hldev->first_vp_id = i;
745 		break;
746 	}
747 }
748 
749 /*
750  * __vxge_hw_verify_pci_e_info - Validate the pci-e link parameters such as
751  * link width and signalling rate.
752  */
753 static enum vxge_hw_status
__vxge_hw_verify_pci_e_info(struct __vxge_hw_device * hldev)754 __vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev)
755 {
756 	struct pci_dev *dev = hldev->pdev;
757 	u16 lnk;
758 
759 	/* Get the negotiated link width and speed from PCI config space */
760 	pci_read_config_word(dev, dev->pcie_cap + PCI_EXP_LNKSTA, &lnk);
761 
762 	if ((lnk & PCI_EXP_LNKSTA_CLS) != 1)
763 		return VXGE_HW_ERR_INVALID_PCI_INFO;
764 
765 	switch ((lnk & PCI_EXP_LNKSTA_NLW) >> 4) {
766 	case PCIE_LNK_WIDTH_RESRV:
767 	case PCIE_LNK_X1:
768 	case PCIE_LNK_X2:
769 	case PCIE_LNK_X4:
770 	case PCIE_LNK_X8:
771 		break;
772 	default:
773 		return VXGE_HW_ERR_INVALID_PCI_INFO;
774 	}
775 
776 	return VXGE_HW_OK;
777 }
778 
779 /*
780  * __vxge_hw_device_initialize
781  * Initialize Titan-V hardware.
782  */
783 static enum vxge_hw_status
__vxge_hw_device_initialize(struct __vxge_hw_device * hldev)784 __vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
785 {
786 	enum vxge_hw_status status = VXGE_HW_OK;
787 
788 	if (VXGE_HW_OK == __vxge_hw_device_is_privilaged(hldev->host_type,
789 				hldev->func_id)) {
790 		/* Validate the pci-e link width and speed */
791 		status = __vxge_hw_verify_pci_e_info(hldev);
792 		if (status != VXGE_HW_OK)
793 			goto exit;
794 	}
795 
796 exit:
797 	return status;
798 }
799 
800 /*
801  * __vxge_hw_vpath_fw_ver_get - Get the fw version
802  * Returns FW Version
803  */
804 static enum vxge_hw_status
__vxge_hw_vpath_fw_ver_get(struct __vxge_hw_virtualpath * vpath,struct vxge_hw_device_hw_info * hw_info)805 __vxge_hw_vpath_fw_ver_get(struct __vxge_hw_virtualpath *vpath,
806 			   struct vxge_hw_device_hw_info *hw_info)
807 {
808 	struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
809 	struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
810 	struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
811 	struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
812 	u64 data0, data1 = 0, steer_ctrl = 0;
813 	enum vxge_hw_status status;
814 
815 	status = vxge_hw_vpath_fw_api(vpath,
816 			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
817 			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
818 			0, &data0, &data1, &steer_ctrl);
819 	if (status != VXGE_HW_OK)
820 		goto exit;
821 
822 	fw_date->day =
823 	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(data0);
824 	fw_date->month =
825 	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(data0);
826 	fw_date->year =
827 	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(data0);
828 
829 	snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
830 		 fw_date->month, fw_date->day, fw_date->year);
831 
832 	fw_version->major =
833 	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
834 	fw_version->minor =
835 	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
836 	fw_version->build =
837 	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
838 
839 	snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
840 		 fw_version->major, fw_version->minor, fw_version->build);
841 
842 	flash_date->day =
843 	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data1);
844 	flash_date->month =
845 	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data1);
846 	flash_date->year =
847 	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data1);
848 
849 	snprintf(flash_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
850 		 flash_date->month, flash_date->day, flash_date->year);
851 
852 	flash_version->major =
853 	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data1);
854 	flash_version->minor =
855 	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data1);
856 	flash_version->build =
857 	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data1);
858 
859 	snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
860 		 flash_version->major, flash_version->minor,
861 		 flash_version->build);
862 
863 exit:
864 	return status;
865 }
866 
867 /*
868  * __vxge_hw_vpath_card_info_get - Get the serial numbers,
869  * part number and product description.
870  */
871 static enum vxge_hw_status
__vxge_hw_vpath_card_info_get(struct __vxge_hw_virtualpath * vpath,struct vxge_hw_device_hw_info * hw_info)872 __vxge_hw_vpath_card_info_get(struct __vxge_hw_virtualpath *vpath,
873 			      struct vxge_hw_device_hw_info *hw_info)
874 {
875 	enum vxge_hw_status status;
876 	u64 data0, data1 = 0, steer_ctrl = 0;
877 	u8 *serial_number = hw_info->serial_number;
878 	u8 *part_number = hw_info->part_number;
879 	u8 *product_desc = hw_info->product_desc;
880 	u32 i, j = 0;
881 
882 	data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER;
883 
884 	status = vxge_hw_vpath_fw_api(vpath,
885 			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
886 			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
887 			0, &data0, &data1, &steer_ctrl);
888 	if (status != VXGE_HW_OK)
889 		return status;
890 
891 	((u64 *)serial_number)[0] = be64_to_cpu(data0);
892 	((u64 *)serial_number)[1] = be64_to_cpu(data1);
893 
894 	data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER;
895 	data1 = steer_ctrl = 0;
896 
897 	status = vxge_hw_vpath_fw_api(vpath,
898 			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
899 			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
900 			0, &data0, &data1, &steer_ctrl);
901 	if (status != VXGE_HW_OK)
902 		return status;
903 
904 	((u64 *)part_number)[0] = be64_to_cpu(data0);
905 	((u64 *)part_number)[1] = be64_to_cpu(data1);
906 
907 	for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
908 	     i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
909 		data0 = i;
910 		data1 = steer_ctrl = 0;
911 
912 		status = vxge_hw_vpath_fw_api(vpath,
913 			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
914 			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
915 			0, &data0, &data1, &steer_ctrl);
916 		if (status != VXGE_HW_OK)
917 			return status;
918 
919 		((u64 *)product_desc)[j++] = be64_to_cpu(data0);
920 		((u64 *)product_desc)[j++] = be64_to_cpu(data1);
921 	}
922 
923 	return status;
924 }
925 
926 /*
927  * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
928  * Returns pci function mode
929  */
930 static enum vxge_hw_status
__vxge_hw_vpath_pci_func_mode_get(struct __vxge_hw_virtualpath * vpath,struct vxge_hw_device_hw_info * hw_info)931 __vxge_hw_vpath_pci_func_mode_get(struct __vxge_hw_virtualpath *vpath,
932 				  struct vxge_hw_device_hw_info *hw_info)
933 {
934 	u64 data0, data1 = 0, steer_ctrl = 0;
935 	enum vxge_hw_status status;
936 
937 	data0 = 0;
938 
939 	status = vxge_hw_vpath_fw_api(vpath,
940 			VXGE_HW_FW_API_GET_FUNC_MODE,
941 			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
942 			0, &data0, &data1, &steer_ctrl);
943 	if (status != VXGE_HW_OK)
944 		return status;
945 
946 	hw_info->function_mode = VXGE_HW_GET_FUNC_MODE_VAL(data0);
947 	return status;
948 }
949 
950 /*
951  * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
952  *               from MAC address table.
953  */
954 static enum vxge_hw_status
__vxge_hw_vpath_addr_get(struct __vxge_hw_virtualpath * vpath,u8 * macaddr,u8 * macaddr_mask)955 __vxge_hw_vpath_addr_get(struct __vxge_hw_virtualpath *vpath,
956 			 u8 *macaddr, u8 *macaddr_mask)
957 {
958 	u64 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
959 	    data0 = 0, data1 = 0, steer_ctrl = 0;
960 	enum vxge_hw_status status;
961 	int i;
962 
963 	do {
964 		status = vxge_hw_vpath_fw_api(vpath, action,
965 			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
966 			0, &data0, &data1, &steer_ctrl);
967 		if (status != VXGE_HW_OK)
968 			goto exit;
969 
970 		data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data0);
971 		data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
972 									data1);
973 
974 		for (i = ETH_ALEN; i > 0; i--) {
975 			macaddr[i - 1] = (u8) (data0 & 0xFF);
976 			data0 >>= 8;
977 
978 			macaddr_mask[i - 1] = (u8) (data1 & 0xFF);
979 			data1 >>= 8;
980 		}
981 
982 		action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY;
983 		data0 = 0, data1 = 0, steer_ctrl = 0;
984 
985 	} while (!is_valid_ether_addr(macaddr));
986 exit:
987 	return status;
988 }
989 
990 /**
991  * vxge_hw_device_hw_info_get - Get the hw information
992  * Returns the vpath mask that has the bits set for each vpath allocated
993  * for the driver, FW version information, and the first mac address for
994  * each vpath
995  */
996 enum vxge_hw_status __devinit
vxge_hw_device_hw_info_get(void __iomem * bar0,struct vxge_hw_device_hw_info * hw_info)997 vxge_hw_device_hw_info_get(void __iomem *bar0,
998 			   struct vxge_hw_device_hw_info *hw_info)
999 {
1000 	u32 i;
1001 	u64 val64;
1002 	struct vxge_hw_toc_reg __iomem *toc;
1003 	struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
1004 	struct vxge_hw_common_reg __iomem *common_reg;
1005 	struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
1006 	enum vxge_hw_status status;
1007 	struct __vxge_hw_virtualpath vpath;
1008 
1009 	memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info));
1010 
1011 	toc = __vxge_hw_device_toc_get(bar0);
1012 	if (toc == NULL) {
1013 		status = VXGE_HW_ERR_CRITICAL;
1014 		goto exit;
1015 	}
1016 
1017 	val64 = readq(&toc->toc_common_pointer);
1018 	common_reg = bar0 + val64;
1019 
1020 	status = __vxge_hw_device_vpath_reset_in_prog_check(
1021 		(u64 __iomem *)&common_reg->vpath_rst_in_prog);
1022 	if (status != VXGE_HW_OK)
1023 		goto exit;
1024 
1025 	hw_info->vpath_mask = readq(&common_reg->vpath_assignments);
1026 
1027 	val64 = readq(&common_reg->host_type_assignments);
1028 
1029 	hw_info->host_type =
1030 	   (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
1031 
1032 	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1033 		if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
1034 			continue;
1035 
1036 		val64 = readq(&toc->toc_vpmgmt_pointer[i]);
1037 
1038 		vpmgmt_reg = bar0 + val64;
1039 
1040 		hw_info->func_id = __vxge_hw_vpath_func_id_get(vpmgmt_reg);
1041 		if (__vxge_hw_device_access_rights_get(hw_info->host_type,
1042 			hw_info->func_id) &
1043 			VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) {
1044 
1045 			val64 = readq(&toc->toc_mrpcim_pointer);
1046 
1047 			mrpcim_reg = bar0 + val64;
1048 
1049 			writeq(0, &mrpcim_reg->xgmac_gen_fw_memo_mask);
1050 			wmb();
1051 		}
1052 
1053 		val64 = readq(&toc->toc_vpath_pointer[i]);
1054 
1055 		spin_lock_init(&vpath.lock);
1056 		vpath.vp_reg = bar0 + val64;
1057 		vpath.vp_open = VXGE_HW_VP_NOT_OPEN;
1058 
1059 		status = __vxge_hw_vpath_pci_func_mode_get(&vpath, hw_info);
1060 		if (status != VXGE_HW_OK)
1061 			goto exit;
1062 
1063 		status = __vxge_hw_vpath_fw_ver_get(&vpath, hw_info);
1064 		if (status != VXGE_HW_OK)
1065 			goto exit;
1066 
1067 		status = __vxge_hw_vpath_card_info_get(&vpath, hw_info);
1068 		if (status != VXGE_HW_OK)
1069 			goto exit;
1070 
1071 		break;
1072 	}
1073 
1074 	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1075 		if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
1076 			continue;
1077 
1078 		val64 = readq(&toc->toc_vpath_pointer[i]);
1079 		vpath.vp_reg = bar0 + val64;
1080 		vpath.vp_open = VXGE_HW_VP_NOT_OPEN;
1081 
1082 		status =  __vxge_hw_vpath_addr_get(&vpath,
1083 				hw_info->mac_addrs[i],
1084 				hw_info->mac_addr_masks[i]);
1085 		if (status != VXGE_HW_OK)
1086 			goto exit;
1087 	}
1088 exit:
1089 	return status;
1090 }
1091 
1092 /*
1093  * __vxge_hw_blockpool_destroy - Deallocates the block pool
1094  */
__vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool * blockpool)1095 static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
1096 {
1097 	struct __vxge_hw_device *hldev;
1098 	struct list_head *p, *n;
1099 	u16 ret;
1100 
1101 	if (blockpool == NULL) {
1102 		ret = 1;
1103 		goto exit;
1104 	}
1105 
1106 	hldev = blockpool->hldev;
1107 
1108 	list_for_each_safe(p, n, &blockpool->free_block_list) {
1109 		pci_unmap_single(hldev->pdev,
1110 			((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
1111 			((struct __vxge_hw_blockpool_entry *)p)->length,
1112 			PCI_DMA_BIDIRECTIONAL);
1113 
1114 		vxge_os_dma_free(hldev->pdev,
1115 			((struct __vxge_hw_blockpool_entry *)p)->memblock,
1116 			&((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
1117 
1118 		list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
1119 		kfree(p);
1120 		blockpool->pool_size--;
1121 	}
1122 
1123 	list_for_each_safe(p, n, &blockpool->free_entry_list) {
1124 		list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
1125 		kfree((void *)p);
1126 	}
1127 	ret = 0;
1128 exit:
1129 	return;
1130 }
1131 
1132 /*
1133  * __vxge_hw_blockpool_create - Create block pool
1134  */
1135 static enum vxge_hw_status
__vxge_hw_blockpool_create(struct __vxge_hw_device * hldev,struct __vxge_hw_blockpool * blockpool,u32 pool_size,u32 pool_max)1136 __vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
1137 			   struct __vxge_hw_blockpool *blockpool,
1138 			   u32 pool_size,
1139 			   u32 pool_max)
1140 {
1141 	u32 i;
1142 	struct __vxge_hw_blockpool_entry *entry = NULL;
1143 	void *memblock;
1144 	dma_addr_t dma_addr;
1145 	struct pci_dev *dma_handle;
1146 	struct pci_dev *acc_handle;
1147 	enum vxge_hw_status status = VXGE_HW_OK;
1148 
1149 	if (blockpool == NULL) {
1150 		status = VXGE_HW_FAIL;
1151 		goto blockpool_create_exit;
1152 	}
1153 
1154 	blockpool->hldev = hldev;
1155 	blockpool->block_size = VXGE_HW_BLOCK_SIZE;
1156 	blockpool->pool_size = 0;
1157 	blockpool->pool_max = pool_max;
1158 	blockpool->req_out = 0;
1159 
1160 	INIT_LIST_HEAD(&blockpool->free_block_list);
1161 	INIT_LIST_HEAD(&blockpool->free_entry_list);
1162 
1163 	for (i = 0; i < pool_size + pool_max; i++) {
1164 		entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
1165 				GFP_KERNEL);
1166 		if (entry == NULL) {
1167 			__vxge_hw_blockpool_destroy(blockpool);
1168 			status = VXGE_HW_ERR_OUT_OF_MEMORY;
1169 			goto blockpool_create_exit;
1170 		}
1171 		list_add(&entry->item, &blockpool->free_entry_list);
1172 	}
1173 
1174 	for (i = 0; i < pool_size; i++) {
1175 		memblock = vxge_os_dma_malloc(
1176 				hldev->pdev,
1177 				VXGE_HW_BLOCK_SIZE,
1178 				&dma_handle,
1179 				&acc_handle);
1180 		if (memblock == NULL) {
1181 			__vxge_hw_blockpool_destroy(blockpool);
1182 			status = VXGE_HW_ERR_OUT_OF_MEMORY;
1183 			goto blockpool_create_exit;
1184 		}
1185 
1186 		dma_addr = pci_map_single(hldev->pdev, memblock,
1187 				VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL);
1188 		if (unlikely(pci_dma_mapping_error(hldev->pdev,
1189 				dma_addr))) {
1190 			vxge_os_dma_free(hldev->pdev, memblock, &acc_handle);
1191 			__vxge_hw_blockpool_destroy(blockpool);
1192 			status = VXGE_HW_ERR_OUT_OF_MEMORY;
1193 			goto blockpool_create_exit;
1194 		}
1195 
1196 		if (!list_empty(&blockpool->free_entry_list))
1197 			entry = (struct __vxge_hw_blockpool_entry *)
1198 				list_first_entry(&blockpool->free_entry_list,
1199 					struct __vxge_hw_blockpool_entry,
1200 					item);
1201 
1202 		if (entry == NULL)
1203 			entry =
1204 			    kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
1205 					GFP_KERNEL);
1206 		if (entry != NULL) {
1207 			list_del(&entry->item);
1208 			entry->length = VXGE_HW_BLOCK_SIZE;
1209 			entry->memblock = memblock;
1210 			entry->dma_addr = dma_addr;
1211 			entry->acc_handle = acc_handle;
1212 			entry->dma_handle = dma_handle;
1213 			list_add(&entry->item,
1214 					  &blockpool->free_block_list);
1215 			blockpool->pool_size++;
1216 		} else {
1217 			__vxge_hw_blockpool_destroy(blockpool);
1218 			status = VXGE_HW_ERR_OUT_OF_MEMORY;
1219 			goto blockpool_create_exit;
1220 		}
1221 	}
1222 
1223 blockpool_create_exit:
1224 	return status;
1225 }
1226 
1227 /*
1228  * __vxge_hw_device_fifo_config_check - Check fifo configuration.
1229  * Check the fifo configuration
1230  */
1231 static enum vxge_hw_status
__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config * fifo_config)1232 __vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
1233 {
1234 	if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) ||
1235 	    (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS))
1236 		return VXGE_HW_BADCFG_FIFO_BLOCKS;
1237 
1238 	return VXGE_HW_OK;
1239 }
1240 
1241 /*
1242  * __vxge_hw_device_vpath_config_check - Check vpath configuration.
1243  * Check the vpath configuration
1244  */
1245 static enum vxge_hw_status
__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config * vp_config)1246 __vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
1247 {
1248 	enum vxge_hw_status status;
1249 
1250 	if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) ||
1251 	    (vp_config->min_bandwidth >	VXGE_HW_VPATH_BANDWIDTH_MAX))
1252 		return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH;
1253 
1254 	status = __vxge_hw_device_fifo_config_check(&vp_config->fifo);
1255 	if (status != VXGE_HW_OK)
1256 		return status;
1257 
1258 	if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) &&
1259 		((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) ||
1260 		(vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU)))
1261 		return VXGE_HW_BADCFG_VPATH_MTU;
1262 
1263 	if ((vp_config->rpa_strip_vlan_tag !=
1264 		VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) &&
1265 		(vp_config->rpa_strip_vlan_tag !=
1266 		VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) &&
1267 		(vp_config->rpa_strip_vlan_tag !=
1268 		VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE))
1269 		return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG;
1270 
1271 	return VXGE_HW_OK;
1272 }
1273 
1274 /*
1275  * __vxge_hw_device_config_check - Check device configuration.
1276  * Check the device configuration
1277  */
1278 static enum vxge_hw_status
__vxge_hw_device_config_check(struct vxge_hw_device_config * new_config)1279 __vxge_hw_device_config_check(struct vxge_hw_device_config *new_config)
1280 {
1281 	u32 i;
1282 	enum vxge_hw_status status;
1283 
1284 	if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
1285 	    (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
1286 	    (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
1287 	    (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF))
1288 		return VXGE_HW_BADCFG_INTR_MODE;
1289 
1290 	if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) &&
1291 	    (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE))
1292 		return VXGE_HW_BADCFG_RTS_MAC_EN;
1293 
1294 	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1295 		status = __vxge_hw_device_vpath_config_check(
1296 				&new_config->vp_config[i]);
1297 		if (status != VXGE_HW_OK)
1298 			return status;
1299 	}
1300 
1301 	return VXGE_HW_OK;
1302 }
1303 
1304 /*
1305  * vxge_hw_device_initialize - Initialize Titan device.
1306  * Initialize Titan device. Note that all the arguments of this public API
1307  * are 'IN', including @hldev. Driver cooperates with
1308  * OS to find new Titan device, locate its PCI and memory spaces.
1309  *
1310  * When done, the driver allocates sizeof(struct __vxge_hw_device) bytes for HW
1311  * to enable the latter to perform Titan hardware initialization.
1312  */
1313 enum vxge_hw_status __devinit
vxge_hw_device_initialize(struct __vxge_hw_device ** devh,struct vxge_hw_device_attr * attr,struct vxge_hw_device_config * device_config)1314 vxge_hw_device_initialize(
1315 	struct __vxge_hw_device **devh,
1316 	struct vxge_hw_device_attr *attr,
1317 	struct vxge_hw_device_config *device_config)
1318 {
1319 	u32 i;
1320 	u32 nblocks = 0;
1321 	struct __vxge_hw_device *hldev = NULL;
1322 	enum vxge_hw_status status = VXGE_HW_OK;
1323 
1324 	status = __vxge_hw_device_config_check(device_config);
1325 	if (status != VXGE_HW_OK)
1326 		goto exit;
1327 
1328 	hldev = vzalloc(sizeof(struct __vxge_hw_device));
1329 	if (hldev == NULL) {
1330 		status = VXGE_HW_ERR_OUT_OF_MEMORY;
1331 		goto exit;
1332 	}
1333 
1334 	hldev->magic = VXGE_HW_DEVICE_MAGIC;
1335 
1336 	vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL);
1337 
1338 	/* apply config */
1339 	memcpy(&hldev->config, device_config,
1340 		sizeof(struct vxge_hw_device_config));
1341 
1342 	hldev->bar0 = attr->bar0;
1343 	hldev->pdev = attr->pdev;
1344 
1345 	hldev->uld_callbacks = attr->uld_callbacks;
1346 
1347 	__vxge_hw_device_pci_e_init(hldev);
1348 
1349 	status = __vxge_hw_device_reg_addr_get(hldev);
1350 	if (status != VXGE_HW_OK) {
1351 		vfree(hldev);
1352 		goto exit;
1353 	}
1354 
1355 	__vxge_hw_device_host_info_get(hldev);
1356 
1357 	/* Incrementing for stats blocks */
1358 	nblocks++;
1359 
1360 	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1361 		if (!(hldev->vpath_assignments & vxge_mBIT(i)))
1362 			continue;
1363 
1364 		if (device_config->vp_config[i].ring.enable ==
1365 			VXGE_HW_RING_ENABLE)
1366 			nblocks += device_config->vp_config[i].ring.ring_blocks;
1367 
1368 		if (device_config->vp_config[i].fifo.enable ==
1369 			VXGE_HW_FIFO_ENABLE)
1370 			nblocks += device_config->vp_config[i].fifo.fifo_blocks;
1371 		nblocks++;
1372 	}
1373 
1374 	if (__vxge_hw_blockpool_create(hldev,
1375 		&hldev->block_pool,
1376 		device_config->dma_blockpool_initial + nblocks,
1377 		device_config->dma_blockpool_max + nblocks) != VXGE_HW_OK) {
1378 
1379 		vxge_hw_device_terminate(hldev);
1380 		status = VXGE_HW_ERR_OUT_OF_MEMORY;
1381 		goto exit;
1382 	}
1383 
1384 	status = __vxge_hw_device_initialize(hldev);
1385 	if (status != VXGE_HW_OK) {
1386 		vxge_hw_device_terminate(hldev);
1387 		goto exit;
1388 	}
1389 
1390 	*devh = hldev;
1391 exit:
1392 	return status;
1393 }
1394 
1395 /*
1396  * vxge_hw_device_terminate - Terminate Titan device.
1397  * Terminate HW device.
1398  */
1399 void
vxge_hw_device_terminate(struct __vxge_hw_device * hldev)1400 vxge_hw_device_terminate(struct __vxge_hw_device *hldev)
1401 {
1402 	vxge_assert(hldev->magic == VXGE_HW_DEVICE_MAGIC);
1403 
1404 	hldev->magic = VXGE_HW_DEVICE_DEAD;
1405 	__vxge_hw_blockpool_destroy(&hldev->block_pool);
1406 	vfree(hldev);
1407 }
1408 
1409 /*
1410  * __vxge_hw_vpath_stats_access - Get the statistics from the given location
1411  *                           and offset and perform an operation
1412  */
1413 static enum vxge_hw_status
__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath * vpath,u32 operation,u32 offset,u64 * stat)1414 __vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
1415 			     u32 operation, u32 offset, u64 *stat)
1416 {
1417 	u64 val64;
1418 	enum vxge_hw_status status = VXGE_HW_OK;
1419 	struct vxge_hw_vpath_reg __iomem *vp_reg;
1420 
1421 	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
1422 		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
1423 		goto vpath_stats_access_exit;
1424 	}
1425 
1426 	vp_reg = vpath->vp_reg;
1427 
1428 	val64 =  VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) |
1429 		 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE |
1430 		 VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset);
1431 
1432 	status = __vxge_hw_pio_mem_write64(val64,
1433 				&vp_reg->xmac_stats_access_cmd,
1434 				VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE,
1435 				vpath->hldev->config.device_poll_millis);
1436 	if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
1437 		*stat = readq(&vp_reg->xmac_stats_access_data);
1438 	else
1439 		*stat = 0;
1440 
1441 vpath_stats_access_exit:
1442 	return status;
1443 }
1444 
1445 /*
1446  * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
1447  */
1448 static enum vxge_hw_status
__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath * vpath,struct vxge_hw_xmac_vpath_tx_stats * vpath_tx_stats)1449 __vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath,
1450 			struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
1451 {
1452 	u64 *val64;
1453 	int i;
1454 	u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET;
1455 	enum vxge_hw_status status = VXGE_HW_OK;
1456 
1457 	val64 = (u64 *)vpath_tx_stats;
1458 
1459 	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
1460 		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
1461 		goto exit;
1462 	}
1463 
1464 	for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) {
1465 		status = __vxge_hw_vpath_stats_access(vpath,
1466 					VXGE_HW_STATS_OP_READ,
1467 					offset, val64);
1468 		if (status != VXGE_HW_OK)
1469 			goto exit;
1470 		offset++;
1471 		val64++;
1472 	}
1473 exit:
1474 	return status;
1475 }
1476 
1477 /*
1478  * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
1479  */
1480 static enum vxge_hw_status
__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath * vpath,struct vxge_hw_xmac_vpath_rx_stats * vpath_rx_stats)1481 __vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
1482 			struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
1483 {
1484 	u64 *val64;
1485 	enum vxge_hw_status status = VXGE_HW_OK;
1486 	int i;
1487 	u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET;
1488 	val64 = (u64 *) vpath_rx_stats;
1489 
1490 	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
1491 		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
1492 		goto exit;
1493 	}
1494 	for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) {
1495 		status = __vxge_hw_vpath_stats_access(vpath,
1496 					VXGE_HW_STATS_OP_READ,
1497 					offset >> 3, val64);
1498 		if (status != VXGE_HW_OK)
1499 			goto exit;
1500 
1501 		offset += 8;
1502 		val64++;
1503 	}
1504 exit:
1505 	return status;
1506 }
1507 
1508 /*
1509  * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
1510  */
1511 static enum vxge_hw_status
__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath * vpath,struct vxge_hw_vpath_stats_hw_info * hw_stats)1512 __vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
1513 			  struct vxge_hw_vpath_stats_hw_info *hw_stats)
1514 {
1515 	u64 val64;
1516 	enum vxge_hw_status status = VXGE_HW_OK;
1517 	struct vxge_hw_vpath_reg __iomem *vp_reg;
1518 
1519 	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
1520 		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
1521 		goto exit;
1522 	}
1523 	vp_reg = vpath->vp_reg;
1524 
1525 	val64 = readq(&vp_reg->vpath_debug_stats0);
1526 	hw_stats->ini_num_mwr_sent =
1527 		(u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64);
1528 
1529 	val64 = readq(&vp_reg->vpath_debug_stats1);
1530 	hw_stats->ini_num_mrd_sent =
1531 		(u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64);
1532 
1533 	val64 = readq(&vp_reg->vpath_debug_stats2);
1534 	hw_stats->ini_num_cpl_rcvd =
1535 		(u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64);
1536 
1537 	val64 = readq(&vp_reg->vpath_debug_stats3);
1538 	hw_stats->ini_num_mwr_byte_sent =
1539 		VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64);
1540 
1541 	val64 = readq(&vp_reg->vpath_debug_stats4);
1542 	hw_stats->ini_num_cpl_byte_rcvd =
1543 		VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64);
1544 
1545 	val64 = readq(&vp_reg->vpath_debug_stats5);
1546 	hw_stats->wrcrdtarb_xoff =
1547 		(u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64);
1548 
1549 	val64 = readq(&vp_reg->vpath_debug_stats6);
1550 	hw_stats->rdcrdtarb_xoff =
1551 		(u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64);
1552 
1553 	val64 = readq(&vp_reg->vpath_genstats_count01);
1554 	hw_stats->vpath_genstats_count0 =
1555 	(u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(
1556 		val64);
1557 
1558 	val64 = readq(&vp_reg->vpath_genstats_count01);
1559 	hw_stats->vpath_genstats_count1 =
1560 	(u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(
1561 		val64);
1562 
1563 	val64 = readq(&vp_reg->vpath_genstats_count23);
1564 	hw_stats->vpath_genstats_count2 =
1565 	(u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(
1566 		val64);
1567 
1568 	val64 = readq(&vp_reg->vpath_genstats_count01);
1569 	hw_stats->vpath_genstats_count3 =
1570 	(u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(
1571 		val64);
1572 
1573 	val64 = readq(&vp_reg->vpath_genstats_count4);
1574 	hw_stats->vpath_genstats_count4 =
1575 	(u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(
1576 		val64);
1577 
1578 	val64 = readq(&vp_reg->vpath_genstats_count5);
1579 	hw_stats->vpath_genstats_count5 =
1580 	(u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(
1581 		val64);
1582 
1583 	status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats);
1584 	if (status != VXGE_HW_OK)
1585 		goto exit;
1586 
1587 	status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats);
1588 	if (status != VXGE_HW_OK)
1589 		goto exit;
1590 
1591 	VXGE_HW_VPATH_STATS_PIO_READ(
1592 		VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET);
1593 
1594 	hw_stats->prog_event_vnum0 =
1595 			(u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64);
1596 
1597 	hw_stats->prog_event_vnum1 =
1598 			(u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64);
1599 
1600 	VXGE_HW_VPATH_STATS_PIO_READ(
1601 		VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET);
1602 
1603 	hw_stats->prog_event_vnum2 =
1604 			(u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64);
1605 
1606 	hw_stats->prog_event_vnum3 =
1607 			(u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64);
1608 
1609 	val64 = readq(&vp_reg->rx_multi_cast_stats);
1610 	hw_stats->rx_multi_cast_frame_discard =
1611 		(u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64);
1612 
1613 	val64 = readq(&vp_reg->rx_frm_transferred);
1614 	hw_stats->rx_frm_transferred =
1615 		(u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64);
1616 
1617 	val64 = readq(&vp_reg->rxd_returned);
1618 	hw_stats->rxd_returned =
1619 		(u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64);
1620 
1621 	val64 = readq(&vp_reg->dbg_stats_rx_mpa);
1622 	hw_stats->rx_mpa_len_fail_frms =
1623 		(u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64);
1624 	hw_stats->rx_mpa_mrk_fail_frms =
1625 		(u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64);
1626 	hw_stats->rx_mpa_crc_fail_frms =
1627 		(u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64);
1628 
1629 	val64 = readq(&vp_reg->dbg_stats_rx_fau);
1630 	hw_stats->rx_permitted_frms =
1631 		(u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64);
1632 	hw_stats->rx_vp_reset_discarded_frms =
1633 	(u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64);
1634 	hw_stats->rx_wol_frms =
1635 		(u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64);
1636 
1637 	val64 = readq(&vp_reg->tx_vp_reset_discarded_frms);
1638 	hw_stats->tx_vp_reset_discarded_frms =
1639 	(u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(
1640 		val64);
1641 exit:
1642 	return status;
1643 }
1644 
1645 /*
1646  * vxge_hw_device_stats_get - Get the device hw statistics.
1647  * Returns the vpath h/w stats for the device.
1648  */
1649 enum vxge_hw_status
vxge_hw_device_stats_get(struct __vxge_hw_device * hldev,struct vxge_hw_device_stats_hw_info * hw_stats)1650 vxge_hw_device_stats_get(struct __vxge_hw_device *hldev,
1651 			struct vxge_hw_device_stats_hw_info *hw_stats)
1652 {
1653 	u32 i;
1654 	enum vxge_hw_status status = VXGE_HW_OK;
1655 
1656 	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1657 		if (!(hldev->vpaths_deployed & vxge_mBIT(i)) ||
1658 			(hldev->virtual_paths[i].vp_open ==
1659 				VXGE_HW_VP_NOT_OPEN))
1660 			continue;
1661 
1662 		memcpy(hldev->virtual_paths[i].hw_stats_sav,
1663 				hldev->virtual_paths[i].hw_stats,
1664 				sizeof(struct vxge_hw_vpath_stats_hw_info));
1665 
1666 		status = __vxge_hw_vpath_stats_get(
1667 			&hldev->virtual_paths[i],
1668 			hldev->virtual_paths[i].hw_stats);
1669 	}
1670 
1671 	memcpy(hw_stats, &hldev->stats.hw_dev_info_stats,
1672 			sizeof(struct vxge_hw_device_stats_hw_info));
1673 
1674 	return status;
1675 }
1676 
1677 /*
1678  * vxge_hw_driver_stats_get - Get the device sw statistics.
1679  * Returns the vpath s/w stats for the device.
1680  */
vxge_hw_driver_stats_get(struct __vxge_hw_device * hldev,struct vxge_hw_device_stats_sw_info * sw_stats)1681 enum vxge_hw_status vxge_hw_driver_stats_get(
1682 			struct __vxge_hw_device *hldev,
1683 			struct vxge_hw_device_stats_sw_info *sw_stats)
1684 {
1685 	enum vxge_hw_status status = VXGE_HW_OK;
1686 
1687 	memcpy(sw_stats, &hldev->stats.sw_dev_info_stats,
1688 		sizeof(struct vxge_hw_device_stats_sw_info));
1689 
1690 	return status;
1691 }
1692 
1693 /*
1694  * vxge_hw_mrpcim_stats_access - Access the statistics from the given location
1695  *                           and offset and perform an operation
1696  * Get the statistics from the given location and offset.
1697  */
1698 enum vxge_hw_status
vxge_hw_mrpcim_stats_access(struct __vxge_hw_device * hldev,u32 operation,u32 location,u32 offset,u64 * stat)1699 vxge_hw_mrpcim_stats_access(struct __vxge_hw_device *hldev,
1700 			    u32 operation, u32 location, u32 offset, u64 *stat)
1701 {
1702 	u64 val64;
1703 	enum vxge_hw_status status = VXGE_HW_OK;
1704 
1705 	status = __vxge_hw_device_is_privilaged(hldev->host_type,
1706 			hldev->func_id);
1707 	if (status != VXGE_HW_OK)
1708 		goto exit;
1709 
1710 	val64 = VXGE_HW_XMAC_STATS_SYS_CMD_OP(operation) |
1711 		VXGE_HW_XMAC_STATS_SYS_CMD_STROBE |
1712 		VXGE_HW_XMAC_STATS_SYS_CMD_LOC_SEL(location) |
1713 		VXGE_HW_XMAC_STATS_SYS_CMD_OFFSET_SEL(offset);
1714 
1715 	status = __vxge_hw_pio_mem_write64(val64,
1716 				&hldev->mrpcim_reg->xmac_stats_sys_cmd,
1717 				VXGE_HW_XMAC_STATS_SYS_CMD_STROBE,
1718 				hldev->config.device_poll_millis);
1719 
1720 	if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
1721 		*stat = readq(&hldev->mrpcim_reg->xmac_stats_sys_data);
1722 	else
1723 		*stat = 0;
1724 exit:
1725 	return status;
1726 }
1727 
1728 /*
1729  * vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port
1730  * Get the Statistics on aggregate port
1731  */
1732 static enum vxge_hw_status
vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device * hldev,u32 port,struct vxge_hw_xmac_aggr_stats * aggr_stats)1733 vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port,
1734 				   struct vxge_hw_xmac_aggr_stats *aggr_stats)
1735 {
1736 	u64 *val64;
1737 	int i;
1738 	u32 offset = VXGE_HW_STATS_AGGRn_OFFSET;
1739 	enum vxge_hw_status status = VXGE_HW_OK;
1740 
1741 	val64 = (u64 *)aggr_stats;
1742 
1743 	status = __vxge_hw_device_is_privilaged(hldev->host_type,
1744 			hldev->func_id);
1745 	if (status != VXGE_HW_OK)
1746 		goto exit;
1747 
1748 	for (i = 0; i < sizeof(struct vxge_hw_xmac_aggr_stats) / 8; i++) {
1749 		status = vxge_hw_mrpcim_stats_access(hldev,
1750 					VXGE_HW_STATS_OP_READ,
1751 					VXGE_HW_STATS_LOC_AGGR,
1752 					((offset + (104 * port)) >> 3), val64);
1753 		if (status != VXGE_HW_OK)
1754 			goto exit;
1755 
1756 		offset += 8;
1757 		val64++;
1758 	}
1759 exit:
1760 	return status;
1761 }
1762 
1763 /*
1764  * vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port
1765  * Get the Statistics on port
1766  */
1767 static enum vxge_hw_status
vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device * hldev,u32 port,struct vxge_hw_xmac_port_stats * port_stats)1768 vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port,
1769 				   struct vxge_hw_xmac_port_stats *port_stats)
1770 {
1771 	u64 *val64;
1772 	enum vxge_hw_status status = VXGE_HW_OK;
1773 	int i;
1774 	u32 offset = 0x0;
1775 	val64 = (u64 *) port_stats;
1776 
1777 	status = __vxge_hw_device_is_privilaged(hldev->host_type,
1778 			hldev->func_id);
1779 	if (status != VXGE_HW_OK)
1780 		goto exit;
1781 
1782 	for (i = 0; i < sizeof(struct vxge_hw_xmac_port_stats) / 8; i++) {
1783 		status = vxge_hw_mrpcim_stats_access(hldev,
1784 					VXGE_HW_STATS_OP_READ,
1785 					VXGE_HW_STATS_LOC_AGGR,
1786 					((offset + (608 * port)) >> 3), val64);
1787 		if (status != VXGE_HW_OK)
1788 			goto exit;
1789 
1790 		offset += 8;
1791 		val64++;
1792 	}
1793 
1794 exit:
1795 	return status;
1796 }
1797 
1798 /*
1799  * vxge_hw_device_xmac_stats_get - Get the XMAC Statistics
1800  * Get the XMAC Statistics
1801  */
1802 enum vxge_hw_status
vxge_hw_device_xmac_stats_get(struct __vxge_hw_device * hldev,struct vxge_hw_xmac_stats * xmac_stats)1803 vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *hldev,
1804 			      struct vxge_hw_xmac_stats *xmac_stats)
1805 {
1806 	enum vxge_hw_status status = VXGE_HW_OK;
1807 	u32 i;
1808 
1809 	status = vxge_hw_device_xmac_aggr_stats_get(hldev,
1810 					0, &xmac_stats->aggr_stats[0]);
1811 	if (status != VXGE_HW_OK)
1812 		goto exit;
1813 
1814 	status = vxge_hw_device_xmac_aggr_stats_get(hldev,
1815 				1, &xmac_stats->aggr_stats[1]);
1816 	if (status != VXGE_HW_OK)
1817 		goto exit;
1818 
1819 	for (i = 0; i <= VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
1820 
1821 		status = vxge_hw_device_xmac_port_stats_get(hldev,
1822 					i, &xmac_stats->port_stats[i]);
1823 		if (status != VXGE_HW_OK)
1824 			goto exit;
1825 	}
1826 
1827 	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1828 
1829 		if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
1830 			continue;
1831 
1832 		status = __vxge_hw_vpath_xmac_tx_stats_get(
1833 					&hldev->virtual_paths[i],
1834 					&xmac_stats->vpath_tx_stats[i]);
1835 		if (status != VXGE_HW_OK)
1836 			goto exit;
1837 
1838 		status = __vxge_hw_vpath_xmac_rx_stats_get(
1839 					&hldev->virtual_paths[i],
1840 					&xmac_stats->vpath_rx_stats[i]);
1841 		if (status != VXGE_HW_OK)
1842 			goto exit;
1843 	}
1844 exit:
1845 	return status;
1846 }
1847 
1848 /*
1849  * vxge_hw_device_debug_set - Set the debug module, level and timestamp
1850  * This routine is used to dynamically change the debug output
1851  */
vxge_hw_device_debug_set(struct __vxge_hw_device * hldev,enum vxge_debug_level level,u32 mask)1852 void vxge_hw_device_debug_set(struct __vxge_hw_device *hldev,
1853 			      enum vxge_debug_level level, u32 mask)
1854 {
1855 	if (hldev == NULL)
1856 		return;
1857 
1858 #if defined(VXGE_DEBUG_TRACE_MASK) || \
1859 	defined(VXGE_DEBUG_ERR_MASK)
1860 	hldev->debug_module_mask = mask;
1861 	hldev->debug_level = level;
1862 #endif
1863 
1864 #if defined(VXGE_DEBUG_ERR_MASK)
1865 	hldev->level_err = level & VXGE_ERR;
1866 #endif
1867 
1868 #if defined(VXGE_DEBUG_TRACE_MASK)
1869 	hldev->level_trace = level & VXGE_TRACE;
1870 #endif
1871 }
1872 
1873 /*
1874  * vxge_hw_device_error_level_get - Get the error level
1875  * This routine returns the current error level set
1876  */
vxge_hw_device_error_level_get(struct __vxge_hw_device * hldev)1877 u32 vxge_hw_device_error_level_get(struct __vxge_hw_device *hldev)
1878 {
1879 #if defined(VXGE_DEBUG_ERR_MASK)
1880 	if (hldev == NULL)
1881 		return VXGE_ERR;
1882 	else
1883 		return hldev->level_err;
1884 #else
1885 	return 0;
1886 #endif
1887 }
1888 
1889 /*
1890  * vxge_hw_device_trace_level_get - Get the trace level
1891  * This routine returns the current trace level set
1892  */
vxge_hw_device_trace_level_get(struct __vxge_hw_device * hldev)1893 u32 vxge_hw_device_trace_level_get(struct __vxge_hw_device *hldev)
1894 {
1895 #if defined(VXGE_DEBUG_TRACE_MASK)
1896 	if (hldev == NULL)
1897 		return VXGE_TRACE;
1898 	else
1899 		return hldev->level_trace;
1900 #else
1901 	return 0;
1902 #endif
1903 }
1904 
1905 /*
1906  * vxge_hw_getpause_data -Pause frame frame generation and reception.
1907  * Returns the Pause frame generation and reception capability of the NIC.
1908  */
vxge_hw_device_getpause_data(struct __vxge_hw_device * hldev,u32 port,u32 * tx,u32 * rx)1909 enum vxge_hw_status vxge_hw_device_getpause_data(struct __vxge_hw_device *hldev,
1910 						 u32 port, u32 *tx, u32 *rx)
1911 {
1912 	u64 val64;
1913 	enum vxge_hw_status status = VXGE_HW_OK;
1914 
1915 	if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
1916 		status = VXGE_HW_ERR_INVALID_DEVICE;
1917 		goto exit;
1918 	}
1919 
1920 	if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
1921 		status = VXGE_HW_ERR_INVALID_PORT;
1922 		goto exit;
1923 	}
1924 
1925 	if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
1926 		status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
1927 		goto exit;
1928 	}
1929 
1930 	val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1931 	if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN)
1932 		*tx = 1;
1933 	if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN)
1934 		*rx = 1;
1935 exit:
1936 	return status;
1937 }
1938 
1939 /*
1940  * vxge_hw_device_setpause_data -  set/reset pause frame generation.
1941  * It can be used to set or reset Pause frame generation or reception
1942  * support of the NIC.
1943  */
vxge_hw_device_setpause_data(struct __vxge_hw_device * hldev,u32 port,u32 tx,u32 rx)1944 enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev,
1945 						 u32 port, u32 tx, u32 rx)
1946 {
1947 	u64 val64;
1948 	enum vxge_hw_status status = VXGE_HW_OK;
1949 
1950 	if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
1951 		status = VXGE_HW_ERR_INVALID_DEVICE;
1952 		goto exit;
1953 	}
1954 
1955 	if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
1956 		status = VXGE_HW_ERR_INVALID_PORT;
1957 		goto exit;
1958 	}
1959 
1960 	status = __vxge_hw_device_is_privilaged(hldev->host_type,
1961 			hldev->func_id);
1962 	if (status != VXGE_HW_OK)
1963 		goto exit;
1964 
1965 	val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1966 	if (tx)
1967 		val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
1968 	else
1969 		val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
1970 	if (rx)
1971 		val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
1972 	else
1973 		val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
1974 
1975 	writeq(val64, &hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1976 exit:
1977 	return status;
1978 }
1979 
vxge_hw_device_link_width_get(struct __vxge_hw_device * hldev)1980 u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *hldev)
1981 {
1982 	struct pci_dev *dev = hldev->pdev;
1983 	u16 lnk;
1984 
1985 	pci_read_config_word(dev, dev->pcie_cap + PCI_EXP_LNKSTA, &lnk);
1986 	return (lnk & VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH) >> 4;
1987 }
1988 
1989 /*
1990  * __vxge_hw_ring_block_memblock_idx - Return the memblock index
1991  * This function returns the index of memory block
1992  */
1993 static inline u32
__vxge_hw_ring_block_memblock_idx(u8 * block)1994 __vxge_hw_ring_block_memblock_idx(u8 *block)
1995 {
1996 	return (u32)*((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET));
1997 }
1998 
1999 /*
2000  * __vxge_hw_ring_block_memblock_idx_set - Sets the memblock index
2001  * This function sets index to a memory block
2002  */
2003 static inline void
__vxge_hw_ring_block_memblock_idx_set(u8 * block,u32 memblock_idx)2004 __vxge_hw_ring_block_memblock_idx_set(u8 *block, u32 memblock_idx)
2005 {
2006 	*((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET)) = memblock_idx;
2007 }
2008 
2009 /*
2010  * __vxge_hw_ring_block_next_pointer_set - Sets the next block pointer
2011  * in RxD block
2012  * Sets the next block pointer in RxD block
2013  */
2014 static inline void
__vxge_hw_ring_block_next_pointer_set(u8 * block,dma_addr_t dma_next)2015 __vxge_hw_ring_block_next_pointer_set(u8 *block, dma_addr_t dma_next)
2016 {
2017 	*((u64 *)(block + VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET)) = dma_next;
2018 }
2019 
2020 /*
2021  * __vxge_hw_ring_first_block_address_get - Returns the dma address of the
2022  *             first block
2023  * Returns the dma address of the first RxD block
2024  */
__vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring * ring)2025 static u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring)
2026 {
2027 	struct vxge_hw_mempool_dma *dma_object;
2028 
2029 	dma_object = ring->mempool->memblocks_dma_arr;
2030 	vxge_assert(dma_object != NULL);
2031 
2032 	return dma_object->addr;
2033 }
2034 
2035 /*
2036  * __vxge_hw_ring_item_dma_addr - Return the dma address of an item
2037  * This function returns the dma address of a given item
2038  */
__vxge_hw_ring_item_dma_addr(struct vxge_hw_mempool * mempoolh,void * item)2039 static dma_addr_t __vxge_hw_ring_item_dma_addr(struct vxge_hw_mempool *mempoolh,
2040 					       void *item)
2041 {
2042 	u32 memblock_idx;
2043 	void *memblock;
2044 	struct vxge_hw_mempool_dma *memblock_dma_object;
2045 	ptrdiff_t dma_item_offset;
2046 
2047 	/* get owner memblock index */
2048 	memblock_idx = __vxge_hw_ring_block_memblock_idx(item);
2049 
2050 	/* get owner memblock by memblock index */
2051 	memblock = mempoolh->memblocks_arr[memblock_idx];
2052 
2053 	/* get memblock DMA object by memblock index */
2054 	memblock_dma_object = mempoolh->memblocks_dma_arr + memblock_idx;
2055 
2056 	/* calculate offset in the memblock of this item */
2057 	dma_item_offset = (u8 *)item - (u8 *)memblock;
2058 
2059 	return memblock_dma_object->addr + dma_item_offset;
2060 }
2061 
2062 /*
2063  * __vxge_hw_ring_rxdblock_link - Link the RxD blocks
2064  * This function returns the dma address of a given item
2065  */
__vxge_hw_ring_rxdblock_link(struct vxge_hw_mempool * mempoolh,struct __vxge_hw_ring * ring,u32 from,u32 to)2066 static void __vxge_hw_ring_rxdblock_link(struct vxge_hw_mempool *mempoolh,
2067 					 struct __vxge_hw_ring *ring, u32 from,
2068 					 u32 to)
2069 {
2070 	u8 *to_item , *from_item;
2071 	dma_addr_t to_dma;
2072 
2073 	/* get "from" RxD block */
2074 	from_item = mempoolh->items_arr[from];
2075 	vxge_assert(from_item);
2076 
2077 	/* get "to" RxD block */
2078 	to_item = mempoolh->items_arr[to];
2079 	vxge_assert(to_item);
2080 
2081 	/* return address of the beginning of previous RxD block */
2082 	to_dma = __vxge_hw_ring_item_dma_addr(mempoolh, to_item);
2083 
2084 	/* set next pointer for this RxD block to point on
2085 	 * previous item's DMA start address */
2086 	__vxge_hw_ring_block_next_pointer_set(from_item, to_dma);
2087 }
2088 
2089 /*
2090  * __vxge_hw_ring_mempool_item_alloc - Allocate List blocks for RxD
2091  * block callback
2092  * This function is callback passed to __vxge_hw_mempool_create to create memory
2093  * pool for RxD block
2094  */
2095 static void
__vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool * mempoolh,u32 memblock_index,struct vxge_hw_mempool_dma * dma_object,u32 index,u32 is_last)2096 __vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool *mempoolh,
2097 				  u32 memblock_index,
2098 				  struct vxge_hw_mempool_dma *dma_object,
2099 				  u32 index, u32 is_last)
2100 {
2101 	u32 i;
2102 	void *item = mempoolh->items_arr[index];
2103 	struct __vxge_hw_ring *ring =
2104 		(struct __vxge_hw_ring *)mempoolh->userdata;
2105 
2106 	/* format rxds array */
2107 	for (i = 0; i < ring->rxds_per_block; i++) {
2108 		void *rxdblock_priv;
2109 		void *uld_priv;
2110 		struct vxge_hw_ring_rxd_1 *rxdp;
2111 
2112 		u32 reserve_index = ring->channel.reserve_ptr -
2113 				(index * ring->rxds_per_block + i + 1);
2114 		u32 memblock_item_idx;
2115 
2116 		ring->channel.reserve_arr[reserve_index] = ((u8 *)item) +
2117 						i * ring->rxd_size;
2118 
2119 		/* Note: memblock_item_idx is index of the item within
2120 		 *       the memblock. For instance, in case of three RxD-blocks
2121 		 *       per memblock this value can be 0, 1 or 2. */
2122 		rxdblock_priv = __vxge_hw_mempool_item_priv(mempoolh,
2123 					memblock_index, item,
2124 					&memblock_item_idx);
2125 
2126 		rxdp = ring->channel.reserve_arr[reserve_index];
2127 
2128 		uld_priv = ((u8 *)rxdblock_priv + ring->rxd_priv_size * i);
2129 
2130 		/* pre-format Host_Control */
2131 		rxdp->host_control = (u64)(size_t)uld_priv;
2132 	}
2133 
2134 	__vxge_hw_ring_block_memblock_idx_set(item, memblock_index);
2135 
2136 	if (is_last) {
2137 		/* link last one with first one */
2138 		__vxge_hw_ring_rxdblock_link(mempoolh, ring, index, 0);
2139 	}
2140 
2141 	if (index > 0) {
2142 		/* link this RxD block with previous one */
2143 		__vxge_hw_ring_rxdblock_link(mempoolh, ring, index - 1, index);
2144 	}
2145 }
2146 
2147 /*
2148  * __vxge_hw_ring_replenish - Initial replenish of RxDs
2149  * This function replenishes the RxDs from reserve array to work array
2150  */
2151 enum vxge_hw_status
vxge_hw_ring_replenish(struct __vxge_hw_ring * ring)2152 vxge_hw_ring_replenish(struct __vxge_hw_ring *ring)
2153 {
2154 	void *rxd;
2155 	struct __vxge_hw_channel *channel;
2156 	enum vxge_hw_status status = VXGE_HW_OK;
2157 
2158 	channel = &ring->channel;
2159 
2160 	while (vxge_hw_channel_dtr_count(channel) > 0) {
2161 
2162 		status = vxge_hw_ring_rxd_reserve(ring, &rxd);
2163 
2164 		vxge_assert(status == VXGE_HW_OK);
2165 
2166 		if (ring->rxd_init) {
2167 			status = ring->rxd_init(rxd, channel->userdata);
2168 			if (status != VXGE_HW_OK) {
2169 				vxge_hw_ring_rxd_free(ring, rxd);
2170 				goto exit;
2171 			}
2172 		}
2173 
2174 		vxge_hw_ring_rxd_post(ring, rxd);
2175 	}
2176 	status = VXGE_HW_OK;
2177 exit:
2178 	return status;
2179 }
2180 
2181 /*
2182  * __vxge_hw_channel_allocate - Allocate memory for channel
2183  * This function allocates required memory for the channel and various arrays
2184  * in the channel
2185  */
2186 static struct __vxge_hw_channel *
__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle * vph,enum __vxge_hw_channel_type type,u32 length,u32 per_dtr_space,void * userdata)2187 __vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
2188 			   enum __vxge_hw_channel_type type,
2189 			   u32 length, u32 per_dtr_space,
2190 			   void *userdata)
2191 {
2192 	struct __vxge_hw_channel *channel;
2193 	struct __vxge_hw_device *hldev;
2194 	int size = 0;
2195 	u32 vp_id;
2196 
2197 	hldev = vph->vpath->hldev;
2198 	vp_id = vph->vpath->vp_id;
2199 
2200 	switch (type) {
2201 	case VXGE_HW_CHANNEL_TYPE_FIFO:
2202 		size = sizeof(struct __vxge_hw_fifo);
2203 		break;
2204 	case VXGE_HW_CHANNEL_TYPE_RING:
2205 		size = sizeof(struct __vxge_hw_ring);
2206 		break;
2207 	default:
2208 		break;
2209 	}
2210 
2211 	channel = kzalloc(size, GFP_KERNEL);
2212 	if (channel == NULL)
2213 		goto exit0;
2214 	INIT_LIST_HEAD(&channel->item);
2215 
2216 	channel->common_reg = hldev->common_reg;
2217 	channel->first_vp_id = hldev->first_vp_id;
2218 	channel->type = type;
2219 	channel->devh = hldev;
2220 	channel->vph = vph;
2221 	channel->userdata = userdata;
2222 	channel->per_dtr_space = per_dtr_space;
2223 	channel->length = length;
2224 	channel->vp_id = vp_id;
2225 
2226 	channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
2227 	if (channel->work_arr == NULL)
2228 		goto exit1;
2229 
2230 	channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
2231 	if (channel->free_arr == NULL)
2232 		goto exit1;
2233 	channel->free_ptr = length;
2234 
2235 	channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
2236 	if (channel->reserve_arr == NULL)
2237 		goto exit1;
2238 	channel->reserve_ptr = length;
2239 	channel->reserve_top = 0;
2240 
2241 	channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
2242 	if (channel->orig_arr == NULL)
2243 		goto exit1;
2244 
2245 	return channel;
2246 exit1:
2247 	__vxge_hw_channel_free(channel);
2248 
2249 exit0:
2250 	return NULL;
2251 }
2252 
2253 /*
2254  * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
2255  * Adds a block to block pool
2256  */
vxge_hw_blockpool_block_add(struct __vxge_hw_device * devh,void * block_addr,u32 length,struct pci_dev * dma_h,struct pci_dev * acc_handle)2257 static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
2258 					void *block_addr,
2259 					u32 length,
2260 					struct pci_dev *dma_h,
2261 					struct pci_dev *acc_handle)
2262 {
2263 	struct __vxge_hw_blockpool *blockpool;
2264 	struct __vxge_hw_blockpool_entry *entry = NULL;
2265 	dma_addr_t dma_addr;
2266 	enum vxge_hw_status status = VXGE_HW_OK;
2267 	u32 req_out;
2268 
2269 	blockpool = &devh->block_pool;
2270 
2271 	if (block_addr == NULL) {
2272 		blockpool->req_out--;
2273 		status = VXGE_HW_FAIL;
2274 		goto exit;
2275 	}
2276 
2277 	dma_addr = pci_map_single(devh->pdev, block_addr, length,
2278 				PCI_DMA_BIDIRECTIONAL);
2279 
2280 	if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) {
2281 		vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
2282 		blockpool->req_out--;
2283 		status = VXGE_HW_FAIL;
2284 		goto exit;
2285 	}
2286 
2287 	if (!list_empty(&blockpool->free_entry_list))
2288 		entry = (struct __vxge_hw_blockpool_entry *)
2289 			list_first_entry(&blockpool->free_entry_list,
2290 				struct __vxge_hw_blockpool_entry,
2291 				item);
2292 
2293 	if (entry == NULL)
2294 		entry =	vmalloc(sizeof(struct __vxge_hw_blockpool_entry));
2295 	else
2296 		list_del(&entry->item);
2297 
2298 	if (entry != NULL) {
2299 		entry->length = length;
2300 		entry->memblock = block_addr;
2301 		entry->dma_addr = dma_addr;
2302 		entry->acc_handle = acc_handle;
2303 		entry->dma_handle = dma_h;
2304 		list_add(&entry->item, &blockpool->free_block_list);
2305 		blockpool->pool_size++;
2306 		status = VXGE_HW_OK;
2307 	} else
2308 		status = VXGE_HW_ERR_OUT_OF_MEMORY;
2309 
2310 	blockpool->req_out--;
2311 
2312 	req_out = blockpool->req_out;
2313 exit:
2314 	return;
2315 }
2316 
2317 static inline void
vxge_os_dma_malloc_async(struct pci_dev * pdev,void * devh,unsigned long size)2318 vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh, unsigned long size)
2319 {
2320 	gfp_t flags;
2321 	void *vaddr;
2322 
2323 	if (in_interrupt())
2324 		flags = GFP_ATOMIC | GFP_DMA;
2325 	else
2326 		flags = GFP_KERNEL | GFP_DMA;
2327 
2328 	vaddr = kmalloc((size), flags);
2329 
2330 	vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
2331 }
2332 
2333 /*
2334  * __vxge_hw_blockpool_blocks_add - Request additional blocks
2335  */
2336 static
__vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool * blockpool)2337 void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool)
2338 {
2339 	u32 nreq = 0, i;
2340 
2341 	if ((blockpool->pool_size  +  blockpool->req_out) <
2342 		VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) {
2343 		nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE;
2344 		blockpool->req_out += nreq;
2345 	}
2346 
2347 	for (i = 0; i < nreq; i++)
2348 		vxge_os_dma_malloc_async(
2349 			((struct __vxge_hw_device *)blockpool->hldev)->pdev,
2350 			blockpool->hldev, VXGE_HW_BLOCK_SIZE);
2351 }
2352 
2353 /*
2354  * __vxge_hw_blockpool_malloc - Allocate a memory block from pool
2355  * Allocates a block of memory of given size, either from block pool
2356  * or by calling vxge_os_dma_malloc()
2357  */
__vxge_hw_blockpool_malloc(struct __vxge_hw_device * devh,u32 size,struct vxge_hw_mempool_dma * dma_object)2358 static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
2359 					struct vxge_hw_mempool_dma *dma_object)
2360 {
2361 	struct __vxge_hw_blockpool_entry *entry = NULL;
2362 	struct __vxge_hw_blockpool  *blockpool;
2363 	void *memblock = NULL;
2364 	enum vxge_hw_status status = VXGE_HW_OK;
2365 
2366 	blockpool = &devh->block_pool;
2367 
2368 	if (size != blockpool->block_size) {
2369 
2370 		memblock = vxge_os_dma_malloc(devh->pdev, size,
2371 						&dma_object->handle,
2372 						&dma_object->acc_handle);
2373 
2374 		if (memblock == NULL) {
2375 			status = VXGE_HW_ERR_OUT_OF_MEMORY;
2376 			goto exit;
2377 		}
2378 
2379 		dma_object->addr = pci_map_single(devh->pdev, memblock, size,
2380 					PCI_DMA_BIDIRECTIONAL);
2381 
2382 		if (unlikely(pci_dma_mapping_error(devh->pdev,
2383 				dma_object->addr))) {
2384 			vxge_os_dma_free(devh->pdev, memblock,
2385 				&dma_object->acc_handle);
2386 			status = VXGE_HW_ERR_OUT_OF_MEMORY;
2387 			goto exit;
2388 		}
2389 
2390 	} else {
2391 
2392 		if (!list_empty(&blockpool->free_block_list))
2393 			entry = (struct __vxge_hw_blockpool_entry *)
2394 				list_first_entry(&blockpool->free_block_list,
2395 					struct __vxge_hw_blockpool_entry,
2396 					item);
2397 
2398 		if (entry != NULL) {
2399 			list_del(&entry->item);
2400 			dma_object->addr = entry->dma_addr;
2401 			dma_object->handle = entry->dma_handle;
2402 			dma_object->acc_handle = entry->acc_handle;
2403 			memblock = entry->memblock;
2404 
2405 			list_add(&entry->item,
2406 				&blockpool->free_entry_list);
2407 			blockpool->pool_size--;
2408 		}
2409 
2410 		if (memblock != NULL)
2411 			__vxge_hw_blockpool_blocks_add(blockpool);
2412 	}
2413 exit:
2414 	return memblock;
2415 }
2416 
2417 /*
2418  * __vxge_hw_blockpool_blocks_remove - Free additional blocks
2419  */
2420 static void
__vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool * blockpool)2421 __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
2422 {
2423 	struct list_head *p, *n;
2424 
2425 	list_for_each_safe(p, n, &blockpool->free_block_list) {
2426 
2427 		if (blockpool->pool_size < blockpool->pool_max)
2428 			break;
2429 
2430 		pci_unmap_single(
2431 			((struct __vxge_hw_device *)blockpool->hldev)->pdev,
2432 			((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
2433 			((struct __vxge_hw_blockpool_entry *)p)->length,
2434 			PCI_DMA_BIDIRECTIONAL);
2435 
2436 		vxge_os_dma_free(
2437 			((struct __vxge_hw_device *)blockpool->hldev)->pdev,
2438 			((struct __vxge_hw_blockpool_entry *)p)->memblock,
2439 			&((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
2440 
2441 		list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
2442 
2443 		list_add(p, &blockpool->free_entry_list);
2444 
2445 		blockpool->pool_size--;
2446 
2447 	}
2448 }
2449 
2450 /*
2451  * __vxge_hw_blockpool_free - Frees the memory allcoated with
2452  *				__vxge_hw_blockpool_malloc
2453  */
__vxge_hw_blockpool_free(struct __vxge_hw_device * devh,void * memblock,u32 size,struct vxge_hw_mempool_dma * dma_object)2454 static void __vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
2455 				     void *memblock, u32 size,
2456 				     struct vxge_hw_mempool_dma *dma_object)
2457 {
2458 	struct __vxge_hw_blockpool_entry *entry = NULL;
2459 	struct __vxge_hw_blockpool  *blockpool;
2460 	enum vxge_hw_status status = VXGE_HW_OK;
2461 
2462 	blockpool = &devh->block_pool;
2463 
2464 	if (size != blockpool->block_size) {
2465 		pci_unmap_single(devh->pdev, dma_object->addr, size,
2466 			PCI_DMA_BIDIRECTIONAL);
2467 		vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle);
2468 	} else {
2469 
2470 		if (!list_empty(&blockpool->free_entry_list))
2471 			entry = (struct __vxge_hw_blockpool_entry *)
2472 				list_first_entry(&blockpool->free_entry_list,
2473 					struct __vxge_hw_blockpool_entry,
2474 					item);
2475 
2476 		if (entry == NULL)
2477 			entry =	vmalloc(sizeof(
2478 					struct __vxge_hw_blockpool_entry));
2479 		else
2480 			list_del(&entry->item);
2481 
2482 		if (entry != NULL) {
2483 			entry->length = size;
2484 			entry->memblock = memblock;
2485 			entry->dma_addr = dma_object->addr;
2486 			entry->acc_handle = dma_object->acc_handle;
2487 			entry->dma_handle = dma_object->handle;
2488 			list_add(&entry->item,
2489 					&blockpool->free_block_list);
2490 			blockpool->pool_size++;
2491 			status = VXGE_HW_OK;
2492 		} else
2493 			status = VXGE_HW_ERR_OUT_OF_MEMORY;
2494 
2495 		if (status == VXGE_HW_OK)
2496 			__vxge_hw_blockpool_blocks_remove(blockpool);
2497 	}
2498 }
2499 
2500 /*
2501  * vxge_hw_mempool_destroy
2502  */
__vxge_hw_mempool_destroy(struct vxge_hw_mempool * mempool)2503 static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
2504 {
2505 	u32 i, j;
2506 	struct __vxge_hw_device *devh = mempool->devh;
2507 
2508 	for (i = 0; i < mempool->memblocks_allocated; i++) {
2509 		struct vxge_hw_mempool_dma *dma_object;
2510 
2511 		vxge_assert(mempool->memblocks_arr[i]);
2512 		vxge_assert(mempool->memblocks_dma_arr + i);
2513 
2514 		dma_object = mempool->memblocks_dma_arr + i;
2515 
2516 		for (j = 0; j < mempool->items_per_memblock; j++) {
2517 			u32 index = i * mempool->items_per_memblock + j;
2518 
2519 			/* to skip last partially filled(if any) memblock */
2520 			if (index >= mempool->items_current)
2521 				break;
2522 		}
2523 
2524 		vfree(mempool->memblocks_priv_arr[i]);
2525 
2526 		__vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i],
2527 				mempool->memblock_size, dma_object);
2528 	}
2529 
2530 	vfree(mempool->items_arr);
2531 	vfree(mempool->memblocks_dma_arr);
2532 	vfree(mempool->memblocks_priv_arr);
2533 	vfree(mempool->memblocks_arr);
2534 	vfree(mempool);
2535 }
2536 
2537 /*
2538  * __vxge_hw_mempool_grow
2539  * Will resize mempool up to %num_allocate value.
2540  */
2541 static enum vxge_hw_status
__vxge_hw_mempool_grow(struct vxge_hw_mempool * mempool,u32 num_allocate,u32 * num_allocated)2542 __vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate,
2543 		       u32 *num_allocated)
2544 {
2545 	u32 i, first_time = mempool->memblocks_allocated == 0 ? 1 : 0;
2546 	u32 n_items = mempool->items_per_memblock;
2547 	u32 start_block_idx = mempool->memblocks_allocated;
2548 	u32 end_block_idx = mempool->memblocks_allocated + num_allocate;
2549 	enum vxge_hw_status status = VXGE_HW_OK;
2550 
2551 	*num_allocated = 0;
2552 
2553 	if (end_block_idx > mempool->memblocks_max) {
2554 		status = VXGE_HW_ERR_OUT_OF_MEMORY;
2555 		goto exit;
2556 	}
2557 
2558 	for (i = start_block_idx; i < end_block_idx; i++) {
2559 		u32 j;
2560 		u32 is_last = ((end_block_idx - 1) == i);
2561 		struct vxge_hw_mempool_dma *dma_object =
2562 			mempool->memblocks_dma_arr + i;
2563 		void *the_memblock;
2564 
2565 		/* allocate memblock's private part. Each DMA memblock
2566 		 * has a space allocated for item's private usage upon
2567 		 * mempool's user request. Each time mempool grows, it will
2568 		 * allocate new memblock and its private part at once.
2569 		 * This helps to minimize memory usage a lot. */
2570 		mempool->memblocks_priv_arr[i] =
2571 				vzalloc(mempool->items_priv_size * n_items);
2572 		if (mempool->memblocks_priv_arr[i] == NULL) {
2573 			status = VXGE_HW_ERR_OUT_OF_MEMORY;
2574 			goto exit;
2575 		}
2576 
2577 		/* allocate DMA-capable memblock */
2578 		mempool->memblocks_arr[i] =
2579 			__vxge_hw_blockpool_malloc(mempool->devh,
2580 				mempool->memblock_size, dma_object);
2581 		if (mempool->memblocks_arr[i] == NULL) {
2582 			vfree(mempool->memblocks_priv_arr[i]);
2583 			status = VXGE_HW_ERR_OUT_OF_MEMORY;
2584 			goto exit;
2585 		}
2586 
2587 		(*num_allocated)++;
2588 		mempool->memblocks_allocated++;
2589 
2590 		memset(mempool->memblocks_arr[i], 0, mempool->memblock_size);
2591 
2592 		the_memblock = mempool->memblocks_arr[i];
2593 
2594 		/* fill the items hash array */
2595 		for (j = 0; j < n_items; j++) {
2596 			u32 index = i * n_items + j;
2597 
2598 			if (first_time && index >= mempool->items_initial)
2599 				break;
2600 
2601 			mempool->items_arr[index] =
2602 				((char *)the_memblock + j*mempool->item_size);
2603 
2604 			/* let caller to do more job on each item */
2605 			if (mempool->item_func_alloc != NULL)
2606 				mempool->item_func_alloc(mempool, i,
2607 					dma_object, index, is_last);
2608 
2609 			mempool->items_current = index + 1;
2610 		}
2611 
2612 		if (first_time && mempool->items_current ==
2613 					mempool->items_initial)
2614 			break;
2615 	}
2616 exit:
2617 	return status;
2618 }
2619 
2620 /*
2621  * vxge_hw_mempool_create
2622  * This function will create memory pool object. Pool may grow but will
2623  * never shrink. Pool consists of number of dynamically allocated blocks
2624  * with size enough to hold %items_initial number of items. Memory is
2625  * DMA-able but client must map/unmap before interoperating with the device.
2626  */
2627 static struct vxge_hw_mempool *
__vxge_hw_mempool_create(struct __vxge_hw_device * devh,u32 memblock_size,u32 item_size,u32 items_priv_size,u32 items_initial,u32 items_max,const struct vxge_hw_mempool_cbs * mp_callback,void * userdata)2628 __vxge_hw_mempool_create(struct __vxge_hw_device *devh,
2629 			 u32 memblock_size,
2630 			 u32 item_size,
2631 			 u32 items_priv_size,
2632 			 u32 items_initial,
2633 			 u32 items_max,
2634 			 const struct vxge_hw_mempool_cbs *mp_callback,
2635 			 void *userdata)
2636 {
2637 	enum vxge_hw_status status = VXGE_HW_OK;
2638 	u32 memblocks_to_allocate;
2639 	struct vxge_hw_mempool *mempool = NULL;
2640 	u32 allocated;
2641 
2642 	if (memblock_size < item_size) {
2643 		status = VXGE_HW_FAIL;
2644 		goto exit;
2645 	}
2646 
2647 	mempool = vzalloc(sizeof(struct vxge_hw_mempool));
2648 	if (mempool == NULL) {
2649 		status = VXGE_HW_ERR_OUT_OF_MEMORY;
2650 		goto exit;
2651 	}
2652 
2653 	mempool->devh			= devh;
2654 	mempool->memblock_size		= memblock_size;
2655 	mempool->items_max		= items_max;
2656 	mempool->items_initial		= items_initial;
2657 	mempool->item_size		= item_size;
2658 	mempool->items_priv_size	= items_priv_size;
2659 	mempool->item_func_alloc	= mp_callback->item_func_alloc;
2660 	mempool->userdata		= userdata;
2661 
2662 	mempool->memblocks_allocated = 0;
2663 
2664 	mempool->items_per_memblock = memblock_size / item_size;
2665 
2666 	mempool->memblocks_max = (items_max + mempool->items_per_memblock - 1) /
2667 					mempool->items_per_memblock;
2668 
2669 	/* allocate array of memblocks */
2670 	mempool->memblocks_arr =
2671 		vzalloc(sizeof(void *) * mempool->memblocks_max);
2672 	if (mempool->memblocks_arr == NULL) {
2673 		__vxge_hw_mempool_destroy(mempool);
2674 		status = VXGE_HW_ERR_OUT_OF_MEMORY;
2675 		mempool = NULL;
2676 		goto exit;
2677 	}
2678 
2679 	/* allocate array of private parts of items per memblocks */
2680 	mempool->memblocks_priv_arr =
2681 		vzalloc(sizeof(void *) * mempool->memblocks_max);
2682 	if (mempool->memblocks_priv_arr == NULL) {
2683 		__vxge_hw_mempool_destroy(mempool);
2684 		status = VXGE_HW_ERR_OUT_OF_MEMORY;
2685 		mempool = NULL;
2686 		goto exit;
2687 	}
2688 
2689 	/* allocate array of memblocks DMA objects */
2690 	mempool->memblocks_dma_arr =
2691 		vzalloc(sizeof(struct vxge_hw_mempool_dma) *
2692 			mempool->memblocks_max);
2693 	if (mempool->memblocks_dma_arr == NULL) {
2694 		__vxge_hw_mempool_destroy(mempool);
2695 		status = VXGE_HW_ERR_OUT_OF_MEMORY;
2696 		mempool = NULL;
2697 		goto exit;
2698 	}
2699 
2700 	/* allocate hash array of items */
2701 	mempool->items_arr = vzalloc(sizeof(void *) * mempool->items_max);
2702 	if (mempool->items_arr == NULL) {
2703 		__vxge_hw_mempool_destroy(mempool);
2704 		status = VXGE_HW_ERR_OUT_OF_MEMORY;
2705 		mempool = NULL;
2706 		goto exit;
2707 	}
2708 
2709 	/* calculate initial number of memblocks */
2710 	memblocks_to_allocate = (mempool->items_initial +
2711 				 mempool->items_per_memblock - 1) /
2712 						mempool->items_per_memblock;
2713 
2714 	/* pre-allocate the mempool */
2715 	status = __vxge_hw_mempool_grow(mempool, memblocks_to_allocate,
2716 					&allocated);
2717 	if (status != VXGE_HW_OK) {
2718 		__vxge_hw_mempool_destroy(mempool);
2719 		status = VXGE_HW_ERR_OUT_OF_MEMORY;
2720 		mempool = NULL;
2721 		goto exit;
2722 	}
2723 
2724 exit:
2725 	return mempool;
2726 }
2727 
2728 /*
2729  * __vxge_hw_ring_abort - Returns the RxD
2730  * This function terminates the RxDs of ring
2731  */
__vxge_hw_ring_abort(struct __vxge_hw_ring * ring)2732 static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
2733 {
2734 	void *rxdh;
2735 	struct __vxge_hw_channel *channel;
2736 
2737 	channel = &ring->channel;
2738 
2739 	for (;;) {
2740 		vxge_hw_channel_dtr_try_complete(channel, &rxdh);
2741 
2742 		if (rxdh == NULL)
2743 			break;
2744 
2745 		vxge_hw_channel_dtr_complete(channel);
2746 
2747 		if (ring->rxd_term)
2748 			ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED,
2749 				channel->userdata);
2750 
2751 		vxge_hw_channel_dtr_free(channel, rxdh);
2752 	}
2753 
2754 	return VXGE_HW_OK;
2755 }
2756 
2757 /*
2758  * __vxge_hw_ring_reset - Resets the ring
2759  * This function resets the ring during vpath reset operation
2760  */
__vxge_hw_ring_reset(struct __vxge_hw_ring * ring)2761 static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
2762 {
2763 	enum vxge_hw_status status = VXGE_HW_OK;
2764 	struct __vxge_hw_channel *channel;
2765 
2766 	channel = &ring->channel;
2767 
2768 	__vxge_hw_ring_abort(ring);
2769 
2770 	status = __vxge_hw_channel_reset(channel);
2771 
2772 	if (status != VXGE_HW_OK)
2773 		goto exit;
2774 
2775 	if (ring->rxd_init) {
2776 		status = vxge_hw_ring_replenish(ring);
2777 		if (status != VXGE_HW_OK)
2778 			goto exit;
2779 	}
2780 exit:
2781 	return status;
2782 }
2783 
2784 /*
2785  * __vxge_hw_ring_delete - Removes the ring
2786  * This function freeup the memory pool and removes the ring
2787  */
2788 static enum vxge_hw_status
__vxge_hw_ring_delete(struct __vxge_hw_vpath_handle * vp)2789 __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
2790 {
2791 	struct __vxge_hw_ring *ring = vp->vpath->ringh;
2792 
2793 	__vxge_hw_ring_abort(ring);
2794 
2795 	if (ring->mempool)
2796 		__vxge_hw_mempool_destroy(ring->mempool);
2797 
2798 	vp->vpath->ringh = NULL;
2799 	__vxge_hw_channel_free(&ring->channel);
2800 
2801 	return VXGE_HW_OK;
2802 }
2803 
2804 /*
2805  * __vxge_hw_ring_create - Create a Ring
2806  * This function creates Ring and initializes it.
2807  */
2808 static enum vxge_hw_status
__vxge_hw_ring_create(struct __vxge_hw_vpath_handle * vp,struct vxge_hw_ring_attr * attr)2809 __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
2810 		      struct vxge_hw_ring_attr *attr)
2811 {
2812 	enum vxge_hw_status status = VXGE_HW_OK;
2813 	struct __vxge_hw_ring *ring;
2814 	u32 ring_length;
2815 	struct vxge_hw_ring_config *config;
2816 	struct __vxge_hw_device *hldev;
2817 	u32 vp_id;
2818 	static const struct vxge_hw_mempool_cbs ring_mp_callback = {
2819 		.item_func_alloc = __vxge_hw_ring_mempool_item_alloc,
2820 	};
2821 
2822 	if ((vp == NULL) || (attr == NULL)) {
2823 		status = VXGE_HW_FAIL;
2824 		goto exit;
2825 	}
2826 
2827 	hldev = vp->vpath->hldev;
2828 	vp_id = vp->vpath->vp_id;
2829 
2830 	config = &hldev->config.vp_config[vp_id].ring;
2831 
2832 	ring_length = config->ring_blocks *
2833 			vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
2834 
2835 	ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp,
2836 						VXGE_HW_CHANNEL_TYPE_RING,
2837 						ring_length,
2838 						attr->per_rxd_space,
2839 						attr->userdata);
2840 	if (ring == NULL) {
2841 		status = VXGE_HW_ERR_OUT_OF_MEMORY;
2842 		goto exit;
2843 	}
2844 
2845 	vp->vpath->ringh = ring;
2846 	ring->vp_id = vp_id;
2847 	ring->vp_reg = vp->vpath->vp_reg;
2848 	ring->common_reg = hldev->common_reg;
2849 	ring->stats = &vp->vpath->sw_stats->ring_stats;
2850 	ring->config = config;
2851 	ring->callback = attr->callback;
2852 	ring->rxd_init = attr->rxd_init;
2853 	ring->rxd_term = attr->rxd_term;
2854 	ring->buffer_mode = config->buffer_mode;
2855 	ring->tim_rti_cfg1_saved = vp->vpath->tim_rti_cfg1_saved;
2856 	ring->tim_rti_cfg3_saved = vp->vpath->tim_rti_cfg3_saved;
2857 	ring->rxds_limit = config->rxds_limit;
2858 
2859 	ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
2860 	ring->rxd_priv_size =
2861 		sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space;
2862 	ring->per_rxd_space = attr->per_rxd_space;
2863 
2864 	ring->rxd_priv_size =
2865 		((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) /
2866 		VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
2867 
2868 	/* how many RxDs can fit into one block. Depends on configured
2869 	 * buffer_mode. */
2870 	ring->rxds_per_block =
2871 		vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
2872 
2873 	/* calculate actual RxD block private size */
2874 	ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
2875 	ring->mempool = __vxge_hw_mempool_create(hldev,
2876 				VXGE_HW_BLOCK_SIZE,
2877 				VXGE_HW_BLOCK_SIZE,
2878 				ring->rxdblock_priv_size,
2879 				ring->config->ring_blocks,
2880 				ring->config->ring_blocks,
2881 				&ring_mp_callback,
2882 				ring);
2883 	if (ring->mempool == NULL) {
2884 		__vxge_hw_ring_delete(vp);
2885 		return VXGE_HW_ERR_OUT_OF_MEMORY;
2886 	}
2887 
2888 	status = __vxge_hw_channel_initialize(&ring->channel);
2889 	if (status != VXGE_HW_OK) {
2890 		__vxge_hw_ring_delete(vp);
2891 		goto exit;
2892 	}
2893 
2894 	/* Note:
2895 	 * Specifying rxd_init callback means two things:
2896 	 * 1) rxds need to be initialized by driver at channel-open time;
2897 	 * 2) rxds need to be posted at channel-open time
2898 	 *    (that's what the initial_replenish() below does)
2899 	 * Currently we don't have a case when the 1) is done without the 2).
2900 	 */
2901 	if (ring->rxd_init) {
2902 		status = vxge_hw_ring_replenish(ring);
2903 		if (status != VXGE_HW_OK) {
2904 			__vxge_hw_ring_delete(vp);
2905 			goto exit;
2906 		}
2907 	}
2908 
2909 	/* initial replenish will increment the counter in its post() routine,
2910 	 * we have to reset it */
2911 	ring->stats->common_stats.usage_cnt = 0;
2912 exit:
2913 	return status;
2914 }
2915 
2916 /*
2917  * vxge_hw_device_config_default_get - Initialize device config with defaults.
2918  * Initialize Titan device config with default values.
2919  */
2920 enum vxge_hw_status __devinit
vxge_hw_device_config_default_get(struct vxge_hw_device_config * device_config)2921 vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
2922 {
2923 	u32 i;
2924 
2925 	device_config->dma_blockpool_initial =
2926 					VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
2927 	device_config->dma_blockpool_max = VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
2928 	device_config->intr_mode = VXGE_HW_INTR_MODE_DEF;
2929 	device_config->rth_en = VXGE_HW_RTH_DEFAULT;
2930 	device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_DEFAULT;
2931 	device_config->device_poll_millis =  VXGE_HW_DEF_DEVICE_POLL_MILLIS;
2932 	device_config->rts_mac_en =  VXGE_HW_RTS_MAC_DEFAULT;
2933 
2934 	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
2935 		device_config->vp_config[i].vp_id = i;
2936 
2937 		device_config->vp_config[i].min_bandwidth =
2938 				VXGE_HW_VPATH_BANDWIDTH_DEFAULT;
2939 
2940 		device_config->vp_config[i].ring.enable = VXGE_HW_RING_DEFAULT;
2941 
2942 		device_config->vp_config[i].ring.ring_blocks =
2943 				VXGE_HW_DEF_RING_BLOCKS;
2944 
2945 		device_config->vp_config[i].ring.buffer_mode =
2946 				VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT;
2947 
2948 		device_config->vp_config[i].ring.scatter_mode =
2949 				VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT;
2950 
2951 		device_config->vp_config[i].ring.rxds_limit =
2952 				VXGE_HW_DEF_RING_RXDS_LIMIT;
2953 
2954 		device_config->vp_config[i].fifo.enable = VXGE_HW_FIFO_ENABLE;
2955 
2956 		device_config->vp_config[i].fifo.fifo_blocks =
2957 				VXGE_HW_MIN_FIFO_BLOCKS;
2958 
2959 		device_config->vp_config[i].fifo.max_frags =
2960 				VXGE_HW_MAX_FIFO_FRAGS;
2961 
2962 		device_config->vp_config[i].fifo.memblock_size =
2963 				VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE;
2964 
2965 		device_config->vp_config[i].fifo.alignment_size =
2966 				VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE;
2967 
2968 		device_config->vp_config[i].fifo.intr =
2969 				VXGE_HW_FIFO_QUEUE_INTR_DEFAULT;
2970 
2971 		device_config->vp_config[i].fifo.no_snoop_bits =
2972 				VXGE_HW_FIFO_NO_SNOOP_DEFAULT;
2973 		device_config->vp_config[i].tti.intr_enable =
2974 				VXGE_HW_TIM_INTR_DEFAULT;
2975 
2976 		device_config->vp_config[i].tti.btimer_val =
2977 				VXGE_HW_USE_FLASH_DEFAULT;
2978 
2979 		device_config->vp_config[i].tti.timer_ac_en =
2980 				VXGE_HW_USE_FLASH_DEFAULT;
2981 
2982 		device_config->vp_config[i].tti.timer_ci_en =
2983 				VXGE_HW_USE_FLASH_DEFAULT;
2984 
2985 		device_config->vp_config[i].tti.timer_ri_en =
2986 				VXGE_HW_USE_FLASH_DEFAULT;
2987 
2988 		device_config->vp_config[i].tti.rtimer_val =
2989 				VXGE_HW_USE_FLASH_DEFAULT;
2990 
2991 		device_config->vp_config[i].tti.util_sel =
2992 				VXGE_HW_USE_FLASH_DEFAULT;
2993 
2994 		device_config->vp_config[i].tti.ltimer_val =
2995 				VXGE_HW_USE_FLASH_DEFAULT;
2996 
2997 		device_config->vp_config[i].tti.urange_a =
2998 				VXGE_HW_USE_FLASH_DEFAULT;
2999 
3000 		device_config->vp_config[i].tti.uec_a =
3001 				VXGE_HW_USE_FLASH_DEFAULT;
3002 
3003 		device_config->vp_config[i].tti.urange_b =
3004 				VXGE_HW_USE_FLASH_DEFAULT;
3005 
3006 		device_config->vp_config[i].tti.uec_b =
3007 				VXGE_HW_USE_FLASH_DEFAULT;
3008 
3009 		device_config->vp_config[i].tti.urange_c =
3010 				VXGE_HW_USE_FLASH_DEFAULT;
3011 
3012 		device_config->vp_config[i].tti.uec_c =
3013 				VXGE_HW_USE_FLASH_DEFAULT;
3014 
3015 		device_config->vp_config[i].tti.uec_d =
3016 				VXGE_HW_USE_FLASH_DEFAULT;
3017 
3018 		device_config->vp_config[i].rti.intr_enable =
3019 				VXGE_HW_TIM_INTR_DEFAULT;
3020 
3021 		device_config->vp_config[i].rti.btimer_val =
3022 				VXGE_HW_USE_FLASH_DEFAULT;
3023 
3024 		device_config->vp_config[i].rti.timer_ac_en =
3025 				VXGE_HW_USE_FLASH_DEFAULT;
3026 
3027 		device_config->vp_config[i].rti.timer_ci_en =
3028 				VXGE_HW_USE_FLASH_DEFAULT;
3029 
3030 		device_config->vp_config[i].rti.timer_ri_en =
3031 				VXGE_HW_USE_FLASH_DEFAULT;
3032 
3033 		device_config->vp_config[i].rti.rtimer_val =
3034 				VXGE_HW_USE_FLASH_DEFAULT;
3035 
3036 		device_config->vp_config[i].rti.util_sel =
3037 				VXGE_HW_USE_FLASH_DEFAULT;
3038 
3039 		device_config->vp_config[i].rti.ltimer_val =
3040 				VXGE_HW_USE_FLASH_DEFAULT;
3041 
3042 		device_config->vp_config[i].rti.urange_a =
3043 				VXGE_HW_USE_FLASH_DEFAULT;
3044 
3045 		device_config->vp_config[i].rti.uec_a =
3046 				VXGE_HW_USE_FLASH_DEFAULT;
3047 
3048 		device_config->vp_config[i].rti.urange_b =
3049 				VXGE_HW_USE_FLASH_DEFAULT;
3050 
3051 		device_config->vp_config[i].rti.uec_b =
3052 				VXGE_HW_USE_FLASH_DEFAULT;
3053 
3054 		device_config->vp_config[i].rti.urange_c =
3055 				VXGE_HW_USE_FLASH_DEFAULT;
3056 
3057 		device_config->vp_config[i].rti.uec_c =
3058 				VXGE_HW_USE_FLASH_DEFAULT;
3059 
3060 		device_config->vp_config[i].rti.uec_d =
3061 				VXGE_HW_USE_FLASH_DEFAULT;
3062 
3063 		device_config->vp_config[i].mtu =
3064 				VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU;
3065 
3066 		device_config->vp_config[i].rpa_strip_vlan_tag =
3067 			VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT;
3068 	}
3069 
3070 	return VXGE_HW_OK;
3071 }
3072 
3073 /*
3074  * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
3075  * Set the swapper bits appropriately for the vpath.
3076  */
3077 static enum vxge_hw_status
__vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem * vpath_reg)3078 __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
3079 {
3080 #ifndef __BIG_ENDIAN
3081 	u64 val64;
3082 
3083 	val64 = readq(&vpath_reg->vpath_general_cfg1);
3084 	wmb();
3085 	val64 |= VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN;
3086 	writeq(val64, &vpath_reg->vpath_general_cfg1);
3087 	wmb();
3088 #endif
3089 	return VXGE_HW_OK;
3090 }
3091 
3092 /*
3093  * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc.
3094  * Set the swapper bits appropriately for the vpath.
3095  */
3096 static enum vxge_hw_status
__vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem * legacy_reg,struct vxge_hw_vpath_reg __iomem * vpath_reg)3097 __vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg,
3098 			   struct vxge_hw_vpath_reg __iomem *vpath_reg)
3099 {
3100 	u64 val64;
3101 
3102 	val64 = readq(&legacy_reg->pifm_wr_swap_en);
3103 
3104 	if (val64 == VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE) {
3105 		val64 = readq(&vpath_reg->kdfcctl_cfg0);
3106 		wmb();
3107 
3108 		val64 |= VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0	|
3109 			VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1	|
3110 			VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2;
3111 
3112 		writeq(val64, &vpath_reg->kdfcctl_cfg0);
3113 		wmb();
3114 	}
3115 
3116 	return VXGE_HW_OK;
3117 }
3118 
3119 /*
3120  * vxge_hw_mgmt_reg_read - Read Titan register.
3121  */
3122 enum vxge_hw_status
vxge_hw_mgmt_reg_read(struct __vxge_hw_device * hldev,enum vxge_hw_mgmt_reg_type type,u32 index,u32 offset,u64 * value)3123 vxge_hw_mgmt_reg_read(struct __vxge_hw_device *hldev,
3124 		      enum vxge_hw_mgmt_reg_type type,
3125 		      u32 index, u32 offset, u64 *value)
3126 {
3127 	enum vxge_hw_status status = VXGE_HW_OK;
3128 
3129 	if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
3130 		status = VXGE_HW_ERR_INVALID_DEVICE;
3131 		goto exit;
3132 	}
3133 
3134 	switch (type) {
3135 	case vxge_hw_mgmt_reg_type_legacy:
3136 		if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
3137 			status = VXGE_HW_ERR_INVALID_OFFSET;
3138 			break;
3139 		}
3140 		*value = readq((void __iomem *)hldev->legacy_reg + offset);
3141 		break;
3142 	case vxge_hw_mgmt_reg_type_toc:
3143 		if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
3144 			status = VXGE_HW_ERR_INVALID_OFFSET;
3145 			break;
3146 		}
3147 		*value = readq((void __iomem *)hldev->toc_reg + offset);
3148 		break;
3149 	case vxge_hw_mgmt_reg_type_common:
3150 		if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
3151 			status = VXGE_HW_ERR_INVALID_OFFSET;
3152 			break;
3153 		}
3154 		*value = readq((void __iomem *)hldev->common_reg + offset);
3155 		break;
3156 	case vxge_hw_mgmt_reg_type_mrpcim:
3157 		if (!(hldev->access_rights &
3158 			VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
3159 			status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
3160 			break;
3161 		}
3162 		if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
3163 			status = VXGE_HW_ERR_INVALID_OFFSET;
3164 			break;
3165 		}
3166 		*value = readq((void __iomem *)hldev->mrpcim_reg + offset);
3167 		break;
3168 	case vxge_hw_mgmt_reg_type_srpcim:
3169 		if (!(hldev->access_rights &
3170 			VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
3171 			status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
3172 			break;
3173 		}
3174 		if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
3175 			status = VXGE_HW_ERR_INVALID_INDEX;
3176 			break;
3177 		}
3178 		if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
3179 			status = VXGE_HW_ERR_INVALID_OFFSET;
3180 			break;
3181 		}
3182 		*value = readq((void __iomem *)hldev->srpcim_reg[index] +
3183 				offset);
3184 		break;
3185 	case vxge_hw_mgmt_reg_type_vpmgmt:
3186 		if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
3187 			(!(hldev->vpath_assignments & vxge_mBIT(index)))) {
3188 			status = VXGE_HW_ERR_INVALID_INDEX;
3189 			break;
3190 		}
3191 		if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
3192 			status = VXGE_HW_ERR_INVALID_OFFSET;
3193 			break;
3194 		}
3195 		*value = readq((void __iomem *)hldev->vpmgmt_reg[index] +
3196 				offset);
3197 		break;
3198 	case vxge_hw_mgmt_reg_type_vpath:
3199 		if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) ||
3200 			(!(hldev->vpath_assignments & vxge_mBIT(index)))) {
3201 			status = VXGE_HW_ERR_INVALID_INDEX;
3202 			break;
3203 		}
3204 		if (index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) {
3205 			status = VXGE_HW_ERR_INVALID_INDEX;
3206 			break;
3207 		}
3208 		if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
3209 			status = VXGE_HW_ERR_INVALID_OFFSET;
3210 			break;
3211 		}
3212 		*value = readq((void __iomem *)hldev->vpath_reg[index] +
3213 				offset);
3214 		break;
3215 	default:
3216 		status = VXGE_HW_ERR_INVALID_TYPE;
3217 		break;
3218 	}
3219 
3220 exit:
3221 	return status;
3222 }
3223 
3224 /*
3225  * vxge_hw_vpath_strip_fcs_check - Check for FCS strip.
3226  */
3227 enum vxge_hw_status
vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device * hldev,u64 vpath_mask)3228 vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask)
3229 {
3230 	struct vxge_hw_vpmgmt_reg       __iomem *vpmgmt_reg;
3231 	enum vxge_hw_status status = VXGE_HW_OK;
3232 	int i = 0, j = 0;
3233 
3234 	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3235 		if (!((vpath_mask) & vxge_mBIT(i)))
3236 			continue;
3237 		vpmgmt_reg = hldev->vpmgmt_reg[i];
3238 		for (j = 0; j < VXGE_HW_MAC_MAX_MAC_PORT_ID; j++) {
3239 			if (readq(&vpmgmt_reg->rxmac_cfg0_port_vpmgmt_clone[j])
3240 			& VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS)
3241 				return VXGE_HW_FAIL;
3242 		}
3243 	}
3244 	return status;
3245 }
3246 /*
3247  * vxge_hw_mgmt_reg_Write - Write Titan register.
3248  */
3249 enum vxge_hw_status
vxge_hw_mgmt_reg_write(struct __vxge_hw_device * hldev,enum vxge_hw_mgmt_reg_type type,u32 index,u32 offset,u64 value)3250 vxge_hw_mgmt_reg_write(struct __vxge_hw_device *hldev,
3251 		      enum vxge_hw_mgmt_reg_type type,
3252 		      u32 index, u32 offset, u64 value)
3253 {
3254 	enum vxge_hw_status status = VXGE_HW_OK;
3255 
3256 	if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
3257 		status = VXGE_HW_ERR_INVALID_DEVICE;
3258 		goto exit;
3259 	}
3260 
3261 	switch (type) {
3262 	case vxge_hw_mgmt_reg_type_legacy:
3263 		if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
3264 			status = VXGE_HW_ERR_INVALID_OFFSET;
3265 			break;
3266 		}
3267 		writeq(value, (void __iomem *)hldev->legacy_reg + offset);
3268 		break;
3269 	case vxge_hw_mgmt_reg_type_toc:
3270 		if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
3271 			status = VXGE_HW_ERR_INVALID_OFFSET;
3272 			break;
3273 		}
3274 		writeq(value, (void __iomem *)hldev->toc_reg + offset);
3275 		break;
3276 	case vxge_hw_mgmt_reg_type_common:
3277 		if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
3278 			status = VXGE_HW_ERR_INVALID_OFFSET;
3279 			break;
3280 		}
3281 		writeq(value, (void __iomem *)hldev->common_reg + offset);
3282 		break;
3283 	case vxge_hw_mgmt_reg_type_mrpcim:
3284 		if (!(hldev->access_rights &
3285 			VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
3286 			status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
3287 			break;
3288 		}
3289 		if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
3290 			status = VXGE_HW_ERR_INVALID_OFFSET;
3291 			break;
3292 		}
3293 		writeq(value, (void __iomem *)hldev->mrpcim_reg + offset);
3294 		break;
3295 	case vxge_hw_mgmt_reg_type_srpcim:
3296 		if (!(hldev->access_rights &
3297 			VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
3298 			status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
3299 			break;
3300 		}
3301 		if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
3302 			status = VXGE_HW_ERR_INVALID_INDEX;
3303 			break;
3304 		}
3305 		if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
3306 			status = VXGE_HW_ERR_INVALID_OFFSET;
3307 			break;
3308 		}
3309 		writeq(value, (void __iomem *)hldev->srpcim_reg[index] +
3310 			offset);
3311 
3312 		break;
3313 	case vxge_hw_mgmt_reg_type_vpmgmt:
3314 		if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
3315 			(!(hldev->vpath_assignments & vxge_mBIT(index)))) {
3316 			status = VXGE_HW_ERR_INVALID_INDEX;
3317 			break;
3318 		}
3319 		if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
3320 			status = VXGE_HW_ERR_INVALID_OFFSET;
3321 			break;
3322 		}
3323 		writeq(value, (void __iomem *)hldev->vpmgmt_reg[index] +
3324 			offset);
3325 		break;
3326 	case vxge_hw_mgmt_reg_type_vpath:
3327 		if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES-1) ||
3328 			(!(hldev->vpath_assignments & vxge_mBIT(index)))) {
3329 			status = VXGE_HW_ERR_INVALID_INDEX;
3330 			break;
3331 		}
3332 		if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
3333 			status = VXGE_HW_ERR_INVALID_OFFSET;
3334 			break;
3335 		}
3336 		writeq(value, (void __iomem *)hldev->vpath_reg[index] +
3337 			offset);
3338 		break;
3339 	default:
3340 		status = VXGE_HW_ERR_INVALID_TYPE;
3341 		break;
3342 	}
3343 exit:
3344 	return status;
3345 }
3346 
3347 /*
3348  * __vxge_hw_fifo_abort - Returns the TxD
3349  * This function terminates the TxDs of fifo
3350  */
__vxge_hw_fifo_abort(struct __vxge_hw_fifo * fifo)3351 static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
3352 {
3353 	void *txdlh;
3354 
3355 	for (;;) {
3356 		vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh);
3357 
3358 		if (txdlh == NULL)
3359 			break;
3360 
3361 		vxge_hw_channel_dtr_complete(&fifo->channel);
3362 
3363 		if (fifo->txdl_term) {
3364 			fifo->txdl_term(txdlh,
3365 			VXGE_HW_TXDL_STATE_POSTED,
3366 			fifo->channel.userdata);
3367 		}
3368 
3369 		vxge_hw_channel_dtr_free(&fifo->channel, txdlh);
3370 	}
3371 
3372 	return VXGE_HW_OK;
3373 }
3374 
3375 /*
3376  * __vxge_hw_fifo_reset - Resets the fifo
3377  * This function resets the fifo during vpath reset operation
3378  */
__vxge_hw_fifo_reset(struct __vxge_hw_fifo * fifo)3379 static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
3380 {
3381 	enum vxge_hw_status status = VXGE_HW_OK;
3382 
3383 	__vxge_hw_fifo_abort(fifo);
3384 	status = __vxge_hw_channel_reset(&fifo->channel);
3385 
3386 	return status;
3387 }
3388 
3389 /*
3390  * __vxge_hw_fifo_delete - Removes the FIFO
3391  * This function freeup the memory pool and removes the FIFO
3392  */
3393 static enum vxge_hw_status
__vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle * vp)3394 __vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
3395 {
3396 	struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
3397 
3398 	__vxge_hw_fifo_abort(fifo);
3399 
3400 	if (fifo->mempool)
3401 		__vxge_hw_mempool_destroy(fifo->mempool);
3402 
3403 	vp->vpath->fifoh = NULL;
3404 
3405 	__vxge_hw_channel_free(&fifo->channel);
3406 
3407 	return VXGE_HW_OK;
3408 }
3409 
3410 /*
3411  * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD
3412  * list callback
3413  * This function is callback passed to __vxge_hw_mempool_create to create memory
3414  * pool for TxD list
3415  */
3416 static void
__vxge_hw_fifo_mempool_item_alloc(struct vxge_hw_mempool * mempoolh,u32 memblock_index,struct vxge_hw_mempool_dma * dma_object,u32 index,u32 is_last)3417 __vxge_hw_fifo_mempool_item_alloc(
3418 	struct vxge_hw_mempool *mempoolh,
3419 	u32 memblock_index, struct vxge_hw_mempool_dma *dma_object,
3420 	u32 index, u32 is_last)
3421 {
3422 	u32 memblock_item_idx;
3423 	struct __vxge_hw_fifo_txdl_priv *txdl_priv;
3424 	struct vxge_hw_fifo_txd *txdp =
3425 		(struct vxge_hw_fifo_txd *)mempoolh->items_arr[index];
3426 	struct __vxge_hw_fifo *fifo =
3427 			(struct __vxge_hw_fifo *)mempoolh->userdata;
3428 	void *memblock = mempoolh->memblocks_arr[memblock_index];
3429 
3430 	vxge_assert(txdp);
3431 
3432 	txdp->host_control = (u64) (size_t)
3433 	__vxge_hw_mempool_item_priv(mempoolh, memblock_index, txdp,
3434 					&memblock_item_idx);
3435 
3436 	txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
3437 
3438 	vxge_assert(txdl_priv);
3439 
3440 	fifo->channel.reserve_arr[fifo->channel.reserve_ptr - 1 - index] = txdp;
3441 
3442 	/* pre-format HW's TxDL's private */
3443 	txdl_priv->dma_offset = (char *)txdp - (char *)memblock;
3444 	txdl_priv->dma_addr = dma_object->addr + txdl_priv->dma_offset;
3445 	txdl_priv->dma_handle = dma_object->handle;
3446 	txdl_priv->memblock   = memblock;
3447 	txdl_priv->first_txdp = txdp;
3448 	txdl_priv->next_txdl_priv = NULL;
3449 	txdl_priv->alloc_frags = 0;
3450 }
3451 
3452 /*
3453  * __vxge_hw_fifo_create - Create a FIFO
3454  * This function creates FIFO and initializes it.
3455  */
3456 static enum vxge_hw_status
__vxge_hw_fifo_create(struct __vxge_hw_vpath_handle * vp,struct vxge_hw_fifo_attr * attr)3457 __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
3458 		      struct vxge_hw_fifo_attr *attr)
3459 {
3460 	enum vxge_hw_status status = VXGE_HW_OK;
3461 	struct __vxge_hw_fifo *fifo;
3462 	struct vxge_hw_fifo_config *config;
3463 	u32 txdl_size, txdl_per_memblock;
3464 	struct vxge_hw_mempool_cbs fifo_mp_callback;
3465 	struct __vxge_hw_virtualpath *vpath;
3466 
3467 	if ((vp == NULL) || (attr == NULL)) {
3468 		status = VXGE_HW_ERR_INVALID_HANDLE;
3469 		goto exit;
3470 	}
3471 	vpath = vp->vpath;
3472 	config = &vpath->hldev->config.vp_config[vpath->vp_id].fifo;
3473 
3474 	txdl_size = config->max_frags * sizeof(struct vxge_hw_fifo_txd);
3475 
3476 	txdl_per_memblock = config->memblock_size / txdl_size;
3477 
3478 	fifo = (struct __vxge_hw_fifo *)__vxge_hw_channel_allocate(vp,
3479 					VXGE_HW_CHANNEL_TYPE_FIFO,
3480 					config->fifo_blocks * txdl_per_memblock,
3481 					attr->per_txdl_space, attr->userdata);
3482 
3483 	if (fifo == NULL) {
3484 		status = VXGE_HW_ERR_OUT_OF_MEMORY;
3485 		goto exit;
3486 	}
3487 
3488 	vpath->fifoh = fifo;
3489 	fifo->nofl_db = vpath->nofl_db;
3490 
3491 	fifo->vp_id = vpath->vp_id;
3492 	fifo->vp_reg = vpath->vp_reg;
3493 	fifo->stats = &vpath->sw_stats->fifo_stats;
3494 
3495 	fifo->config = config;
3496 
3497 	/* apply "interrupts per txdl" attribute */
3498 	fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ;
3499 	fifo->tim_tti_cfg1_saved = vpath->tim_tti_cfg1_saved;
3500 	fifo->tim_tti_cfg3_saved = vpath->tim_tti_cfg3_saved;
3501 
3502 	if (fifo->config->intr)
3503 		fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST;
3504 
3505 	fifo->no_snoop_bits = config->no_snoop_bits;
3506 
3507 	/*
3508 	 * FIFO memory management strategy:
3509 	 *
3510 	 * TxDL split into three independent parts:
3511 	 *	- set of TxD's
3512 	 *	- TxD HW private part
3513 	 *	- driver private part
3514 	 *
3515 	 * Adaptative memory allocation used. i.e. Memory allocated on
3516 	 * demand with the size which will fit into one memory block.
3517 	 * One memory block may contain more than one TxDL.
3518 	 *
3519 	 * During "reserve" operations more memory can be allocated on demand
3520 	 * for example due to FIFO full condition.
3521 	 *
3522 	 * Pool of memory memblocks never shrinks except in __vxge_hw_fifo_close
3523 	 * routine which will essentially stop the channel and free resources.
3524 	 */
3525 
3526 	/* TxDL common private size == TxDL private  +  driver private */
3527 	fifo->priv_size =
3528 		sizeof(struct __vxge_hw_fifo_txdl_priv) + attr->per_txdl_space;
3529 	fifo->priv_size = ((fifo->priv_size  +  VXGE_CACHE_LINE_SIZE - 1) /
3530 			VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
3531 
3532 	fifo->per_txdl_space = attr->per_txdl_space;
3533 
3534 	/* recompute txdl size to be cacheline aligned */
3535 	fifo->txdl_size = txdl_size;
3536 	fifo->txdl_per_memblock = txdl_per_memblock;
3537 
3538 	fifo->txdl_term = attr->txdl_term;
3539 	fifo->callback = attr->callback;
3540 
3541 	if (fifo->txdl_per_memblock == 0) {
3542 		__vxge_hw_fifo_delete(vp);
3543 		status = VXGE_HW_ERR_INVALID_BLOCK_SIZE;
3544 		goto exit;
3545 	}
3546 
3547 	fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
3548 
3549 	fifo->mempool =
3550 		__vxge_hw_mempool_create(vpath->hldev,
3551 			fifo->config->memblock_size,
3552 			fifo->txdl_size,
3553 			fifo->priv_size,
3554 			(fifo->config->fifo_blocks * fifo->txdl_per_memblock),
3555 			(fifo->config->fifo_blocks * fifo->txdl_per_memblock),
3556 			&fifo_mp_callback,
3557 			fifo);
3558 
3559 	if (fifo->mempool == NULL) {
3560 		__vxge_hw_fifo_delete(vp);
3561 		status = VXGE_HW_ERR_OUT_OF_MEMORY;
3562 		goto exit;
3563 	}
3564 
3565 	status = __vxge_hw_channel_initialize(&fifo->channel);
3566 	if (status != VXGE_HW_OK) {
3567 		__vxge_hw_fifo_delete(vp);
3568 		goto exit;
3569 	}
3570 
3571 	vxge_assert(fifo->channel.reserve_ptr);
3572 exit:
3573 	return status;
3574 }
3575 
3576 /*
3577  * __vxge_hw_vpath_pci_read - Read the content of given address
3578  *                          in pci config space.
3579  * Read from the vpath pci config space.
3580  */
3581 static enum vxge_hw_status
__vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath * vpath,u32 phy_func_0,u32 offset,u32 * val)3582 __vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath,
3583 			 u32 phy_func_0, u32 offset, u32 *val)
3584 {
3585 	u64 val64;
3586 	enum vxge_hw_status status = VXGE_HW_OK;
3587 	struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
3588 
3589 	val64 =	VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(offset);
3590 
3591 	if (phy_func_0)
3592 		val64 |= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0;
3593 
3594 	writeq(val64, &vp_reg->pci_config_access_cfg1);
3595 	wmb();
3596 	writeq(VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ,
3597 			&vp_reg->pci_config_access_cfg2);
3598 	wmb();
3599 
3600 	status = __vxge_hw_device_register_poll(
3601 			&vp_reg->pci_config_access_cfg2,
3602 			VXGE_HW_INTR_MASK_ALL, VXGE_HW_DEF_DEVICE_POLL_MILLIS);
3603 
3604 	if (status != VXGE_HW_OK)
3605 		goto exit;
3606 
3607 	val64 = readq(&vp_reg->pci_config_access_status);
3608 
3609 	if (val64 & VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR) {
3610 		status = VXGE_HW_FAIL;
3611 		*val = 0;
3612 	} else
3613 		*val = (u32)vxge_bVALn(val64, 32, 32);
3614 exit:
3615 	return status;
3616 }
3617 
3618 /**
3619  * vxge_hw_device_flick_link_led - Flick (blink) link LED.
3620  * @hldev: HW device.
3621  * @on_off: TRUE if flickering to be on, FALSE to be off
3622  *
3623  * Flicker the link LED.
3624  */
3625 enum vxge_hw_status
vxge_hw_device_flick_link_led(struct __vxge_hw_device * hldev,u64 on_off)3626 vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev, u64 on_off)
3627 {
3628 	struct __vxge_hw_virtualpath *vpath;
3629 	u64 data0, data1 = 0, steer_ctrl = 0;
3630 	enum vxge_hw_status status;
3631 
3632 	if (hldev == NULL) {
3633 		status = VXGE_HW_ERR_INVALID_DEVICE;
3634 		goto exit;
3635 	}
3636 
3637 	vpath = &hldev->virtual_paths[hldev->first_vp_id];
3638 
3639 	data0 = on_off;
3640 	status = vxge_hw_vpath_fw_api(vpath,
3641 			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL,
3642 			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
3643 			0, &data0, &data1, &steer_ctrl);
3644 exit:
3645 	return status;
3646 }
3647 
3648 /*
3649  * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables
3650  */
3651 enum vxge_hw_status
__vxge_hw_vpath_rts_table_get(struct __vxge_hw_vpath_handle * vp,u32 action,u32 rts_table,u32 offset,u64 * data0,u64 * data1)3652 __vxge_hw_vpath_rts_table_get(struct __vxge_hw_vpath_handle *vp,
3653 			      u32 action, u32 rts_table, u32 offset,
3654 			      u64 *data0, u64 *data1)
3655 {
3656 	enum vxge_hw_status status;
3657 	u64 steer_ctrl = 0;
3658 
3659 	if (vp == NULL) {
3660 		status = VXGE_HW_ERR_INVALID_HANDLE;
3661 		goto exit;
3662 	}
3663 
3664 	if ((rts_table ==
3665 	     VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) ||
3666 	    (rts_table ==
3667 	     VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) ||
3668 	    (rts_table ==
3669 	     VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) ||
3670 	    (rts_table ==
3671 	     VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) {
3672 		steer_ctrl = VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL;
3673 	}
3674 
3675 	status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
3676 				      data0, data1, &steer_ctrl);
3677 	if (status != VXGE_HW_OK)
3678 		goto exit;
3679 
3680 	if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) &&
3681 	    (rts_table !=
3682 	     VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
3683 		*data1 = 0;
3684 exit:
3685 	return status;
3686 }
3687 
3688 /*
3689  * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables
3690  */
3691 enum vxge_hw_status
__vxge_hw_vpath_rts_table_set(struct __vxge_hw_vpath_handle * vp,u32 action,u32 rts_table,u32 offset,u64 steer_data0,u64 steer_data1)3692 __vxge_hw_vpath_rts_table_set(struct __vxge_hw_vpath_handle *vp, u32 action,
3693 			      u32 rts_table, u32 offset, u64 steer_data0,
3694 			      u64 steer_data1)
3695 {
3696 	u64 data0, data1 = 0, steer_ctrl = 0;
3697 	enum vxge_hw_status status;
3698 
3699 	if (vp == NULL) {
3700 		status = VXGE_HW_ERR_INVALID_HANDLE;
3701 		goto exit;
3702 	}
3703 
3704 	data0 = steer_data0;
3705 
3706 	if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
3707 	    (rts_table ==
3708 	     VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
3709 		data1 = steer_data1;
3710 
3711 	status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
3712 				      &data0, &data1, &steer_ctrl);
3713 exit:
3714 	return status;
3715 }
3716 
3717 /*
3718  * vxge_hw_vpath_rts_rth_set - Set/configure RTS hashing.
3719  */
vxge_hw_vpath_rts_rth_set(struct __vxge_hw_vpath_handle * vp,enum vxge_hw_rth_algoritms algorithm,struct vxge_hw_rth_hash_types * hash_type,u16 bucket_size)3720 enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
3721 			struct __vxge_hw_vpath_handle *vp,
3722 			enum vxge_hw_rth_algoritms algorithm,
3723 			struct vxge_hw_rth_hash_types *hash_type,
3724 			u16 bucket_size)
3725 {
3726 	u64 data0, data1;
3727 	enum vxge_hw_status status = VXGE_HW_OK;
3728 
3729 	if (vp == NULL) {
3730 		status = VXGE_HW_ERR_INVALID_HANDLE;
3731 		goto exit;
3732 	}
3733 
3734 	status = __vxge_hw_vpath_rts_table_get(vp,
3735 		     VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
3736 		     VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
3737 			0, &data0, &data1);
3738 	if (status != VXGE_HW_OK)
3739 		goto exit;
3740 
3741 	data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) |
3742 			VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3));
3743 
3744 	data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_EN |
3745 	VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(bucket_size) |
3746 	VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(algorithm);
3747 
3748 	if (hash_type->hash_type_tcpipv4_en)
3749 		data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV4_EN;
3750 
3751 	if (hash_type->hash_type_ipv4_en)
3752 		data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV4_EN;
3753 
3754 	if (hash_type->hash_type_tcpipv6_en)
3755 		data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EN;
3756 
3757 	if (hash_type->hash_type_ipv6_en)
3758 		data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EN;
3759 
3760 	if (hash_type->hash_type_tcpipv6ex_en)
3761 		data0 |=
3762 		VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EX_EN;
3763 
3764 	if (hash_type->hash_type_ipv6ex_en)
3765 		data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EX_EN;
3766 
3767 	if (VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ACTIVE_TABLE(data0))
3768 		data0 &= ~VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
3769 	else
3770 		data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
3771 
3772 	status = __vxge_hw_vpath_rts_table_set(vp,
3773 		VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY,
3774 		VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
3775 		0, data0, 0);
3776 exit:
3777 	return status;
3778 }
3779 
3780 static void
vxge_hw_rts_rth_data0_data1_get(u32 j,u64 * data0,u64 * data1,u16 flag,u8 * itable)3781 vxge_hw_rts_rth_data0_data1_get(u32 j, u64 *data0, u64 *data1,
3782 				u16 flag, u8 *itable)
3783 {
3784 	switch (flag) {
3785 	case 1:
3786 		*data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_NUM(j)|
3787 			VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN |
3788 			VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA(
3789 			itable[j]);
3790 	case 2:
3791 		*data0 |=
3792 			VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(j)|
3793 			VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN |
3794 			VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA(
3795 			itable[j]);
3796 	case 3:
3797 		*data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(j)|
3798 			VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN |
3799 			VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA(
3800 			itable[j]);
3801 	case 4:
3802 		*data1 |=
3803 			VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(j)|
3804 			VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN |
3805 			VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA(
3806 			itable[j]);
3807 	default:
3808 		return;
3809 	}
3810 }
3811 /*
3812  * vxge_hw_vpath_rts_rth_itable_set - Set/configure indirection table (IT).
3813  */
vxge_hw_vpath_rts_rth_itable_set(struct __vxge_hw_vpath_handle ** vpath_handles,u32 vpath_count,u8 * mtable,u8 * itable,u32 itable_size)3814 enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set(
3815 			struct __vxge_hw_vpath_handle **vpath_handles,
3816 			u32 vpath_count,
3817 			u8 *mtable,
3818 			u8 *itable,
3819 			u32 itable_size)
3820 {
3821 	u32 i, j, action, rts_table;
3822 	u64 data0;
3823 	u64 data1;
3824 	u32 max_entries;
3825 	enum vxge_hw_status status = VXGE_HW_OK;
3826 	struct __vxge_hw_vpath_handle *vp = vpath_handles[0];
3827 
3828 	if (vp == NULL) {
3829 		status = VXGE_HW_ERR_INVALID_HANDLE;
3830 		goto exit;
3831 	}
3832 
3833 	max_entries = (((u32)1) << itable_size);
3834 
3835 	if (vp->vpath->hldev->config.rth_it_type
3836 				== VXGE_HW_RTH_IT_TYPE_SOLO_IT) {
3837 		action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
3838 		rts_table =
3839 			VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT;
3840 
3841 		for (j = 0; j < max_entries; j++) {
3842 
3843 			data1 = 0;
3844 
3845 			data0 =
3846 			VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
3847 				itable[j]);
3848 
3849 			status = __vxge_hw_vpath_rts_table_set(vpath_handles[0],
3850 				action, rts_table, j, data0, data1);
3851 
3852 			if (status != VXGE_HW_OK)
3853 				goto exit;
3854 		}
3855 
3856 		for (j = 0; j < max_entries; j++) {
3857 
3858 			data1 = 0;
3859 
3860 			data0 =
3861 			VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_ENTRY_EN |
3862 			VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
3863 				itable[j]);
3864 
3865 			status = __vxge_hw_vpath_rts_table_set(
3866 				vpath_handles[mtable[itable[j]]], action,
3867 				rts_table, j, data0, data1);
3868 
3869 			if (status != VXGE_HW_OK)
3870 				goto exit;
3871 		}
3872 	} else {
3873 		action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
3874 		rts_table =
3875 			VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT;
3876 		for (i = 0; i < vpath_count; i++) {
3877 
3878 			for (j = 0; j < max_entries;) {
3879 
3880 				data0 = 0;
3881 				data1 = 0;
3882 
3883 				while (j < max_entries) {
3884 					if (mtable[itable[j]] != i) {
3885 						j++;
3886 						continue;
3887 					}
3888 					vxge_hw_rts_rth_data0_data1_get(j,
3889 						&data0, &data1, 1, itable);
3890 					j++;
3891 					break;
3892 				}
3893 
3894 				while (j < max_entries) {
3895 					if (mtable[itable[j]] != i) {
3896 						j++;
3897 						continue;
3898 					}
3899 					vxge_hw_rts_rth_data0_data1_get(j,
3900 						&data0, &data1, 2, itable);
3901 					j++;
3902 					break;
3903 				}
3904 
3905 				while (j < max_entries) {
3906 					if (mtable[itable[j]] != i) {
3907 						j++;
3908 						continue;
3909 					}
3910 					vxge_hw_rts_rth_data0_data1_get(j,
3911 						&data0, &data1, 3, itable);
3912 					j++;
3913 					break;
3914 				}
3915 
3916 				while (j < max_entries) {
3917 					if (mtable[itable[j]] != i) {
3918 						j++;
3919 						continue;
3920 					}
3921 					vxge_hw_rts_rth_data0_data1_get(j,
3922 						&data0, &data1, 4, itable);
3923 					j++;
3924 					break;
3925 				}
3926 
3927 				if (data0 != 0) {
3928 					status = __vxge_hw_vpath_rts_table_set(
3929 							vpath_handles[i],
3930 							action, rts_table,
3931 							0, data0, data1);
3932 
3933 					if (status != VXGE_HW_OK)
3934 						goto exit;
3935 				}
3936 			}
3937 		}
3938 	}
3939 exit:
3940 	return status;
3941 }
3942 
3943 /**
3944  * vxge_hw_vpath_check_leak - Check for memory leak
3945  * @ringh: Handle to the ring object used for receive
3946  *
3947  * If PRC_RXD_DOORBELL_VPn.NEW_QW_CNT is larger or equal to
3948  * PRC_CFG6_VPn.RXD_SPAT then a leak has occurred.
3949  * Returns: VXGE_HW_FAIL, if leak has occurred.
3950  *
3951  */
3952 enum vxge_hw_status
vxge_hw_vpath_check_leak(struct __vxge_hw_ring * ring)3953 vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ring)
3954 {
3955 	enum vxge_hw_status status = VXGE_HW_OK;
3956 	u64 rxd_new_count, rxd_spat;
3957 
3958 	if (ring == NULL)
3959 		return status;
3960 
3961 	rxd_new_count = readl(&ring->vp_reg->prc_rxd_doorbell);
3962 	rxd_spat = readq(&ring->vp_reg->prc_cfg6);
3963 	rxd_spat = VXGE_HW_PRC_CFG6_RXD_SPAT(rxd_spat);
3964 
3965 	if (rxd_new_count >= rxd_spat)
3966 		status = VXGE_HW_FAIL;
3967 
3968 	return status;
3969 }
3970 
3971 /*
3972  * __vxge_hw_vpath_mgmt_read
3973  * This routine reads the vpath_mgmt registers
3974  */
3975 static enum vxge_hw_status
__vxge_hw_vpath_mgmt_read(struct __vxge_hw_device * hldev,struct __vxge_hw_virtualpath * vpath)3976 __vxge_hw_vpath_mgmt_read(
3977 	struct __vxge_hw_device *hldev,
3978 	struct __vxge_hw_virtualpath *vpath)
3979 {
3980 	u32 i, mtu = 0, max_pyld = 0;
3981 	u64 val64;
3982 	enum vxge_hw_status status = VXGE_HW_OK;
3983 
3984 	for (i = 0; i < VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
3985 
3986 		val64 = readq(&vpath->vpmgmt_reg->
3987 				rxmac_cfg0_port_vpmgmt_clone[i]);
3988 		max_pyld =
3989 			(u32)
3990 			VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN
3991 			(val64);
3992 		if (mtu < max_pyld)
3993 			mtu = max_pyld;
3994 	}
3995 
3996 	vpath->max_mtu = mtu + VXGE_HW_MAC_HEADER_MAX_SIZE;
3997 
3998 	val64 = readq(&vpath->vpmgmt_reg->xmac_vsport_choices_vp);
3999 
4000 	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
4001 		if (val64 & vxge_mBIT(i))
4002 			vpath->vsport_number = i;
4003 	}
4004 
4005 	val64 = readq(&vpath->vpmgmt_reg->xgmac_gen_status_vpmgmt_clone);
4006 
4007 	if (val64 & VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK)
4008 		VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_UP);
4009 	else
4010 		VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN);
4011 
4012 	return status;
4013 }
4014 
4015 /*
4016  * __vxge_hw_vpath_reset_check - Check if resetting the vpath completed
4017  * This routine checks the vpath_rst_in_prog register to see if
4018  * adapter completed the reset process for the vpath
4019  */
4020 static enum vxge_hw_status
__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath * vpath)4021 __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
4022 {
4023 	enum vxge_hw_status status;
4024 
4025 	status = __vxge_hw_device_register_poll(
4026 			&vpath->hldev->common_reg->vpath_rst_in_prog,
4027 			VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(
4028 				1 << (16 - vpath->vp_id)),
4029 			vpath->hldev->config.device_poll_millis);
4030 
4031 	return status;
4032 }
4033 
4034 /*
4035  * __vxge_hw_vpath_reset
4036  * This routine resets the vpath on the device
4037  */
4038 static enum vxge_hw_status
__vxge_hw_vpath_reset(struct __vxge_hw_device * hldev,u32 vp_id)4039 __vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
4040 {
4041 	u64 val64;
4042 	enum vxge_hw_status status = VXGE_HW_OK;
4043 
4044 	val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id));
4045 
4046 	__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
4047 				&hldev->common_reg->cmn_rsthdlr_cfg0);
4048 
4049 	return status;
4050 }
4051 
4052 /*
4053  * __vxge_hw_vpath_sw_reset
4054  * This routine resets the vpath structures
4055  */
4056 static enum vxge_hw_status
__vxge_hw_vpath_sw_reset(struct __vxge_hw_device * hldev,u32 vp_id)4057 __vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id)
4058 {
4059 	enum vxge_hw_status status = VXGE_HW_OK;
4060 	struct __vxge_hw_virtualpath *vpath;
4061 
4062 	vpath = (struct __vxge_hw_virtualpath *)&hldev->virtual_paths[vp_id];
4063 
4064 	if (vpath->ringh) {
4065 		status = __vxge_hw_ring_reset(vpath->ringh);
4066 		if (status != VXGE_HW_OK)
4067 			goto exit;
4068 	}
4069 
4070 	if (vpath->fifoh)
4071 		status = __vxge_hw_fifo_reset(vpath->fifoh);
4072 exit:
4073 	return status;
4074 }
4075 
4076 /*
4077  * __vxge_hw_vpath_prc_configure
4078  * This routine configures the prc registers of virtual path using the config
4079  * passed
4080  */
4081 static void
__vxge_hw_vpath_prc_configure(struct __vxge_hw_device * hldev,u32 vp_id)4082 __vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4083 {
4084 	u64 val64;
4085 	struct __vxge_hw_virtualpath *vpath;
4086 	struct vxge_hw_vp_config *vp_config;
4087 	struct vxge_hw_vpath_reg __iomem *vp_reg;
4088 
4089 	vpath = &hldev->virtual_paths[vp_id];
4090 	vp_reg = vpath->vp_reg;
4091 	vp_config = vpath->vp_config;
4092 
4093 	if (vp_config->ring.enable == VXGE_HW_RING_DISABLE)
4094 		return;
4095 
4096 	val64 = readq(&vp_reg->prc_cfg1);
4097 	val64 |= VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE;
4098 	writeq(val64, &vp_reg->prc_cfg1);
4099 
4100 	val64 = readq(&vpath->vp_reg->prc_cfg6);
4101 	val64 |= VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN;
4102 	writeq(val64, &vpath->vp_reg->prc_cfg6);
4103 
4104 	val64 = readq(&vp_reg->prc_cfg7);
4105 
4106 	if (vpath->vp_config->ring.scatter_mode !=
4107 		VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT) {
4108 
4109 		val64 &= ~VXGE_HW_PRC_CFG7_SCATTER_MODE(0x3);
4110 
4111 		switch (vpath->vp_config->ring.scatter_mode) {
4112 		case VXGE_HW_RING_SCATTER_MODE_A:
4113 			val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
4114 					VXGE_HW_PRC_CFG7_SCATTER_MODE_A);
4115 			break;
4116 		case VXGE_HW_RING_SCATTER_MODE_B:
4117 			val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
4118 					VXGE_HW_PRC_CFG7_SCATTER_MODE_B);
4119 			break;
4120 		case VXGE_HW_RING_SCATTER_MODE_C:
4121 			val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
4122 					VXGE_HW_PRC_CFG7_SCATTER_MODE_C);
4123 			break;
4124 		}
4125 	}
4126 
4127 	writeq(val64, &vp_reg->prc_cfg7);
4128 
4129 	writeq(VXGE_HW_PRC_CFG5_RXD0_ADD(
4130 				__vxge_hw_ring_first_block_address_get(
4131 					vpath->ringh) >> 3), &vp_reg->prc_cfg5);
4132 
4133 	val64 = readq(&vp_reg->prc_cfg4);
4134 	val64 |= VXGE_HW_PRC_CFG4_IN_SVC;
4135 	val64 &= ~VXGE_HW_PRC_CFG4_RING_MODE(0x3);
4136 
4137 	val64 |= VXGE_HW_PRC_CFG4_RING_MODE(
4138 			VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER);
4139 
4140 	if (hldev->config.rth_en == VXGE_HW_RTH_DISABLE)
4141 		val64 |= VXGE_HW_PRC_CFG4_RTH_DISABLE;
4142 	else
4143 		val64 &= ~VXGE_HW_PRC_CFG4_RTH_DISABLE;
4144 
4145 	writeq(val64, &vp_reg->prc_cfg4);
4146 }
4147 
4148 /*
4149  * __vxge_hw_vpath_kdfc_configure
4150  * This routine configures the kdfc registers of virtual path using the
4151  * config passed
4152  */
4153 static enum vxge_hw_status
__vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device * hldev,u32 vp_id)4154 __vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4155 {
4156 	u64 val64;
4157 	u64 vpath_stride;
4158 	enum vxge_hw_status status = VXGE_HW_OK;
4159 	struct __vxge_hw_virtualpath *vpath;
4160 	struct vxge_hw_vpath_reg __iomem *vp_reg;
4161 
4162 	vpath = &hldev->virtual_paths[vp_id];
4163 	vp_reg = vpath->vp_reg;
4164 	status = __vxge_hw_kdfc_swapper_set(hldev->legacy_reg, vp_reg);
4165 
4166 	if (status != VXGE_HW_OK)
4167 		goto exit;
4168 
4169 	val64 = readq(&vp_reg->kdfc_drbl_triplet_total);
4170 
4171 	vpath->max_kdfc_db =
4172 		(u32)VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE(
4173 			val64+1)/2;
4174 
4175 	if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
4176 
4177 		vpath->max_nofl_db = vpath->max_kdfc_db;
4178 
4179 		if (vpath->max_nofl_db <
4180 			((vpath->vp_config->fifo.memblock_size /
4181 			(vpath->vp_config->fifo.max_frags *
4182 			sizeof(struct vxge_hw_fifo_txd))) *
4183 			vpath->vp_config->fifo.fifo_blocks)) {
4184 
4185 			return VXGE_HW_BADCFG_FIFO_BLOCKS;
4186 		}
4187 		val64 = VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0(
4188 				(vpath->max_nofl_db*2)-1);
4189 	}
4190 
4191 	writeq(val64, &vp_reg->kdfc_fifo_trpl_partition);
4192 
4193 	writeq(VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE,
4194 		&vp_reg->kdfc_fifo_trpl_ctrl);
4195 
4196 	val64 = readq(&vp_reg->kdfc_trpl_fifo_0_ctrl);
4197 
4198 	val64 &= ~(VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(0x3) |
4199 		   VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0xFF));
4200 
4201 	val64 |= VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(
4202 		 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY) |
4203 #ifndef __BIG_ENDIAN
4204 		 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN |
4205 #endif
4206 		 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0);
4207 
4208 	writeq(val64, &vp_reg->kdfc_trpl_fifo_0_ctrl);
4209 	writeq((u64)0, &vp_reg->kdfc_trpl_fifo_0_wb_address);
4210 	wmb();
4211 	vpath_stride = readq(&hldev->toc_reg->toc_kdfc_vpath_stride);
4212 
4213 	vpath->nofl_db =
4214 		(struct __vxge_hw_non_offload_db_wrapper __iomem *)
4215 		(hldev->kdfc + (vp_id *
4216 		VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE(
4217 					vpath_stride)));
4218 exit:
4219 	return status;
4220 }
4221 
4222 /*
4223  * __vxge_hw_vpath_mac_configure
4224  * This routine configures the mac of virtual path using the config passed
4225  */
4226 static enum vxge_hw_status
__vxge_hw_vpath_mac_configure(struct __vxge_hw_device * hldev,u32 vp_id)4227 __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4228 {
4229 	u64 val64;
4230 	enum vxge_hw_status status = VXGE_HW_OK;
4231 	struct __vxge_hw_virtualpath *vpath;
4232 	struct vxge_hw_vp_config *vp_config;
4233 	struct vxge_hw_vpath_reg __iomem *vp_reg;
4234 
4235 	vpath = &hldev->virtual_paths[vp_id];
4236 	vp_reg = vpath->vp_reg;
4237 	vp_config = vpath->vp_config;
4238 
4239 	writeq(VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER(
4240 			vpath->vsport_number), &vp_reg->xmac_vsport_choice);
4241 
4242 	if (vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
4243 
4244 		val64 = readq(&vp_reg->xmac_rpa_vcfg);
4245 
4246 		if (vp_config->rpa_strip_vlan_tag !=
4247 			VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) {
4248 			if (vp_config->rpa_strip_vlan_tag)
4249 				val64 |= VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
4250 			else
4251 				val64 &= ~VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
4252 		}
4253 
4254 		writeq(val64, &vp_reg->xmac_rpa_vcfg);
4255 		val64 = readq(&vp_reg->rxmac_vcfg0);
4256 
4257 		if (vp_config->mtu !=
4258 				VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) {
4259 			val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
4260 			if ((vp_config->mtu  +
4261 				VXGE_HW_MAC_HEADER_MAX_SIZE) < vpath->max_mtu)
4262 				val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
4263 					vp_config->mtu  +
4264 					VXGE_HW_MAC_HEADER_MAX_SIZE);
4265 			else
4266 				val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
4267 					vpath->max_mtu);
4268 		}
4269 
4270 		writeq(val64, &vp_reg->rxmac_vcfg0);
4271 
4272 		val64 = readq(&vp_reg->rxmac_vcfg1);
4273 
4274 		val64 &= ~(VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(0x3) |
4275 			VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE);
4276 
4277 		if (hldev->config.rth_it_type ==
4278 				VXGE_HW_RTH_IT_TYPE_MULTI_IT) {
4279 			val64 |= VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(
4280 				0x2) |
4281 				VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE;
4282 		}
4283 
4284 		writeq(val64, &vp_reg->rxmac_vcfg1);
4285 	}
4286 	return status;
4287 }
4288 
4289 /*
4290  * __vxge_hw_vpath_tim_configure
4291  * This routine configures the tim registers of virtual path using the config
4292  * passed
4293  */
4294 static enum vxge_hw_status
__vxge_hw_vpath_tim_configure(struct __vxge_hw_device * hldev,u32 vp_id)4295 __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4296 {
4297 	u64 val64;
4298 	enum vxge_hw_status status = VXGE_HW_OK;
4299 	struct __vxge_hw_virtualpath *vpath;
4300 	struct vxge_hw_vpath_reg __iomem *vp_reg;
4301 	struct vxge_hw_vp_config *config;
4302 
4303 	vpath = &hldev->virtual_paths[vp_id];
4304 	vp_reg = vpath->vp_reg;
4305 	config = vpath->vp_config;
4306 
4307 	writeq(0, &vp_reg->tim_dest_addr);
4308 	writeq(0, &vp_reg->tim_vpath_map);
4309 	writeq(0, &vp_reg->tim_bitmap);
4310 	writeq(0, &vp_reg->tim_remap);
4311 
4312 	if (config->ring.enable == VXGE_HW_RING_ENABLE)
4313 		writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM(
4314 			(vp_id * VXGE_HW_MAX_INTR_PER_VP) +
4315 			VXGE_HW_VPATH_INTR_RX), &vp_reg->tim_ring_assn);
4316 
4317 	val64 = readq(&vp_reg->tim_pci_cfg);
4318 	val64 |= VXGE_HW_TIM_PCI_CFG_ADD_PAD;
4319 	writeq(val64, &vp_reg->tim_pci_cfg);
4320 
4321 	if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
4322 
4323 		val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
4324 
4325 		if (config->tti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
4326 			val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
4327 				0x3ffffff);
4328 			val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
4329 					config->tti.btimer_val);
4330 		}
4331 
4332 		val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
4333 
4334 		if (config->tti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
4335 			if (config->tti.timer_ac_en)
4336 				val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
4337 			else
4338 				val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
4339 		}
4340 
4341 		if (config->tti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
4342 			if (config->tti.timer_ci_en)
4343 				val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
4344 			else
4345 				val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
4346 		}
4347 
4348 		if (config->tti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
4349 			val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
4350 			val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
4351 					config->tti.urange_a);
4352 		}
4353 
4354 		if (config->tti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
4355 			val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
4356 			val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
4357 					config->tti.urange_b);
4358 		}
4359 
4360 		if (config->tti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
4361 			val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
4362 			val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
4363 					config->tti.urange_c);
4364 		}
4365 
4366 		writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
4367 		vpath->tim_tti_cfg1_saved = val64;
4368 
4369 		val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
4370 
4371 		if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
4372 			val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
4373 			val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
4374 						config->tti.uec_a);
4375 		}
4376 
4377 		if (config->tti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
4378 			val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
4379 			val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
4380 						config->tti.uec_b);
4381 		}
4382 
4383 		if (config->tti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
4384 			val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
4385 			val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
4386 						config->tti.uec_c);
4387 		}
4388 
4389 		if (config->tti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
4390 			val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
4391 			val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
4392 						config->tti.uec_d);
4393 		}
4394 
4395 		writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
4396 		val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
4397 
4398 		if (config->tti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
4399 			if (config->tti.timer_ri_en)
4400 				val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
4401 			else
4402 				val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
4403 		}
4404 
4405 		if (config->tti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
4406 			val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
4407 					0x3ffffff);
4408 			val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
4409 					config->tti.rtimer_val);
4410 		}
4411 
4412 		if (config->tti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
4413 			val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
4414 			val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id);
4415 		}
4416 
4417 		if (config->tti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
4418 			val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
4419 					0x3ffffff);
4420 			val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
4421 					config->tti.ltimer_val);
4422 		}
4423 
4424 		writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
4425 		vpath->tim_tti_cfg3_saved = val64;
4426 	}
4427 
4428 	if (config->ring.enable == VXGE_HW_RING_ENABLE) {
4429 
4430 		val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
4431 
4432 		if (config->rti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
4433 			val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
4434 					0x3ffffff);
4435 			val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
4436 					config->rti.btimer_val);
4437 		}
4438 
4439 		val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
4440 
4441 		if (config->rti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
4442 			if (config->rti.timer_ac_en)
4443 				val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
4444 			else
4445 				val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
4446 		}
4447 
4448 		if (config->rti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
4449 			if (config->rti.timer_ci_en)
4450 				val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
4451 			else
4452 				val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
4453 		}
4454 
4455 		if (config->rti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
4456 			val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
4457 			val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
4458 					config->rti.urange_a);
4459 		}
4460 
4461 		if (config->rti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
4462 			val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
4463 			val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
4464 					config->rti.urange_b);
4465 		}
4466 
4467 		if (config->rti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
4468 			val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
4469 			val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
4470 					config->rti.urange_c);
4471 		}
4472 
4473 		writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
4474 		vpath->tim_rti_cfg1_saved = val64;
4475 
4476 		val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
4477 
4478 		if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
4479 			val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
4480 			val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
4481 						config->rti.uec_a);
4482 		}
4483 
4484 		if (config->rti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
4485 			val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
4486 			val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
4487 						config->rti.uec_b);
4488 		}
4489 
4490 		if (config->rti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
4491 			val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
4492 			val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
4493 						config->rti.uec_c);
4494 		}
4495 
4496 		if (config->rti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
4497 			val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
4498 			val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
4499 						config->rti.uec_d);
4500 		}
4501 
4502 		writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
4503 		val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
4504 
4505 		if (config->rti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
4506 			if (config->rti.timer_ri_en)
4507 				val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
4508 			else
4509 				val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
4510 		}
4511 
4512 		if (config->rti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
4513 			val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
4514 					0x3ffffff);
4515 			val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
4516 					config->rti.rtimer_val);
4517 		}
4518 
4519 		if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
4520 			val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
4521 			val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id);
4522 		}
4523 
4524 		if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
4525 			val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
4526 					0x3ffffff);
4527 			val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
4528 					config->rti.ltimer_val);
4529 		}
4530 
4531 		writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
4532 		vpath->tim_rti_cfg3_saved = val64;
4533 	}
4534 
4535 	val64 = 0;
4536 	writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_EINTA]);
4537 	writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_EINTA]);
4538 	writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_EINTA]);
4539 	writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_BMAP]);
4540 	writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]);
4541 	writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]);
4542 
4543 	val64 = VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_PRD(150);
4544 	val64 |= VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_DIV(0);
4545 	val64 |= VXGE_HW_TIM_WRKLD_CLC_CNT_RX_TX(3);
4546 	writeq(val64, &vp_reg->tim_wrkld_clc);
4547 
4548 	return status;
4549 }
4550 
4551 /*
4552  * __vxge_hw_vpath_initialize
4553  * This routine is the final phase of init which initializes the
4554  * registers of the vpath using the configuration passed.
4555  */
4556 static enum vxge_hw_status
__vxge_hw_vpath_initialize(struct __vxge_hw_device * hldev,u32 vp_id)4557 __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
4558 {
4559 	u64 val64;
4560 	u32 val32;
4561 	enum vxge_hw_status status = VXGE_HW_OK;
4562 	struct __vxge_hw_virtualpath *vpath;
4563 	struct vxge_hw_vpath_reg __iomem *vp_reg;
4564 
4565 	vpath = &hldev->virtual_paths[vp_id];
4566 
4567 	if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
4568 		status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
4569 		goto exit;
4570 	}
4571 	vp_reg = vpath->vp_reg;
4572 
4573 	status =  __vxge_hw_vpath_swapper_set(vpath->vp_reg);
4574 	if (status != VXGE_HW_OK)
4575 		goto exit;
4576 
4577 	status =  __vxge_hw_vpath_mac_configure(hldev, vp_id);
4578 	if (status != VXGE_HW_OK)
4579 		goto exit;
4580 
4581 	status =  __vxge_hw_vpath_kdfc_configure(hldev, vp_id);
4582 	if (status != VXGE_HW_OK)
4583 		goto exit;
4584 
4585 	status = __vxge_hw_vpath_tim_configure(hldev, vp_id);
4586 	if (status != VXGE_HW_OK)
4587 		goto exit;
4588 
4589 	val64 = readq(&vp_reg->rtdma_rd_optimization_ctrl);
4590 
4591 	/* Get MRRS value from device control */
4592 	status  = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32);
4593 	if (status == VXGE_HW_OK) {
4594 		val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12;
4595 		val64 &=
4596 		    ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(7));
4597 		val64 |=
4598 		    VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val32);
4599 
4600 		val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE;
4601 	}
4602 
4603 	val64 &= ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(7));
4604 	val64 |=
4605 	    VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(
4606 		    VXGE_HW_MAX_PAYLOAD_SIZE_512);
4607 
4608 	val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN;
4609 	writeq(val64, &vp_reg->rtdma_rd_optimization_ctrl);
4610 
4611 exit:
4612 	return status;
4613 }
4614 
4615 /*
4616  * __vxge_hw_vp_terminate - Terminate Virtual Path structure
4617  * This routine closes all channels it opened and freeup memory
4618  */
__vxge_hw_vp_terminate(struct __vxge_hw_device * hldev,u32 vp_id)4619 static void __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
4620 {
4621 	struct __vxge_hw_virtualpath *vpath;
4622 
4623 	vpath = &hldev->virtual_paths[vp_id];
4624 
4625 	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
4626 		goto exit;
4627 
4628 	VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0,
4629 		vpath->hldev->tim_int_mask1, vpath->vp_id);
4630 	hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL;
4631 
4632 	/* If the whole struct __vxge_hw_virtualpath is zeroed, nothing will
4633 	 * work after the interface is brought down.
4634 	 */
4635 	spin_lock(&vpath->lock);
4636 	vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
4637 	spin_unlock(&vpath->lock);
4638 
4639 	vpath->vpmgmt_reg = NULL;
4640 	vpath->nofl_db = NULL;
4641 	vpath->max_mtu = 0;
4642 	vpath->vsport_number = 0;
4643 	vpath->max_kdfc_db = 0;
4644 	vpath->max_nofl_db = 0;
4645 	vpath->ringh = NULL;
4646 	vpath->fifoh = NULL;
4647 	memset(&vpath->vpath_handles, 0, sizeof(struct list_head));
4648 	vpath->stats_block = 0;
4649 	vpath->hw_stats = NULL;
4650 	vpath->hw_stats_sav = NULL;
4651 	vpath->sw_stats = NULL;
4652 
4653 exit:
4654 	return;
4655 }
4656 
4657 /*
4658  * __vxge_hw_vp_initialize - Initialize Virtual Path structure
4659  * This routine is the initial phase of init which resets the vpath and
4660  * initializes the software support structures.
4661  */
4662 static enum vxge_hw_status
__vxge_hw_vp_initialize(struct __vxge_hw_device * hldev,u32 vp_id,struct vxge_hw_vp_config * config)4663 __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
4664 			struct vxge_hw_vp_config *config)
4665 {
4666 	struct __vxge_hw_virtualpath *vpath;
4667 	enum vxge_hw_status status = VXGE_HW_OK;
4668 
4669 	if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
4670 		status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
4671 		goto exit;
4672 	}
4673 
4674 	vpath = &hldev->virtual_paths[vp_id];
4675 
4676 	spin_lock_init(&vpath->lock);
4677 	vpath->vp_id = vp_id;
4678 	vpath->vp_open = VXGE_HW_VP_OPEN;
4679 	vpath->hldev = hldev;
4680 	vpath->vp_config = config;
4681 	vpath->vp_reg = hldev->vpath_reg[vp_id];
4682 	vpath->vpmgmt_reg = hldev->vpmgmt_reg[vp_id];
4683 
4684 	__vxge_hw_vpath_reset(hldev, vp_id);
4685 
4686 	status = __vxge_hw_vpath_reset_check(vpath);
4687 	if (status != VXGE_HW_OK) {
4688 		memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4689 		goto exit;
4690 	}
4691 
4692 	status = __vxge_hw_vpath_mgmt_read(hldev, vpath);
4693 	if (status != VXGE_HW_OK) {
4694 		memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4695 		goto exit;
4696 	}
4697 
4698 	INIT_LIST_HEAD(&vpath->vpath_handles);
4699 
4700 	vpath->sw_stats = &hldev->stats.sw_dev_info_stats.vpath_info[vp_id];
4701 
4702 	VXGE_HW_DEVICE_TIM_INT_MASK_SET(hldev->tim_int_mask0,
4703 		hldev->tim_int_mask1, vp_id);
4704 
4705 	status = __vxge_hw_vpath_initialize(hldev, vp_id);
4706 	if (status != VXGE_HW_OK)
4707 		__vxge_hw_vp_terminate(hldev, vp_id);
4708 exit:
4709 	return status;
4710 }
4711 
4712 /*
4713  * vxge_hw_vpath_mtu_set - Set MTU.
4714  * Set new MTU value. Example, to use jumbo frames:
4715  * vxge_hw_vpath_mtu_set(my_device, 9600);
4716  */
4717 enum vxge_hw_status
vxge_hw_vpath_mtu_set(struct __vxge_hw_vpath_handle * vp,u32 new_mtu)4718 vxge_hw_vpath_mtu_set(struct __vxge_hw_vpath_handle *vp, u32 new_mtu)
4719 {
4720 	u64 val64;
4721 	enum vxge_hw_status status = VXGE_HW_OK;
4722 	struct __vxge_hw_virtualpath *vpath;
4723 
4724 	if (vp == NULL) {
4725 		status = VXGE_HW_ERR_INVALID_HANDLE;
4726 		goto exit;
4727 	}
4728 	vpath = vp->vpath;
4729 
4730 	new_mtu += VXGE_HW_MAC_HEADER_MAX_SIZE;
4731 
4732 	if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > vpath->max_mtu))
4733 		status = VXGE_HW_ERR_INVALID_MTU_SIZE;
4734 
4735 	val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
4736 
4737 	val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
4738 	val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_mtu);
4739 
4740 	writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
4741 
4742 	vpath->vp_config->mtu = new_mtu - VXGE_HW_MAC_HEADER_MAX_SIZE;
4743 
4744 exit:
4745 	return status;
4746 }
4747 
4748 /*
4749  * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics.
4750  * Enable the DMA vpath statistics. The function is to be called to re-enable
4751  * the adapter to update stats into the host memory
4752  */
4753 static enum vxge_hw_status
vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle * vp)4754 vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
4755 {
4756 	enum vxge_hw_status status = VXGE_HW_OK;
4757 	struct __vxge_hw_virtualpath *vpath;
4758 
4759 	vpath = vp->vpath;
4760 
4761 	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4762 		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4763 		goto exit;
4764 	}
4765 
4766 	memcpy(vpath->hw_stats_sav, vpath->hw_stats,
4767 			sizeof(struct vxge_hw_vpath_stats_hw_info));
4768 
4769 	status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats);
4770 exit:
4771 	return status;
4772 }
4773 
4774 /*
4775  * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
4776  * This function allocates a block from block pool or from the system
4777  */
4778 static struct __vxge_hw_blockpool_entry *
__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device * devh,u32 size)4779 __vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
4780 {
4781 	struct __vxge_hw_blockpool_entry *entry = NULL;
4782 	struct __vxge_hw_blockpool  *blockpool;
4783 
4784 	blockpool = &devh->block_pool;
4785 
4786 	if (size == blockpool->block_size) {
4787 
4788 		if (!list_empty(&blockpool->free_block_list))
4789 			entry = (struct __vxge_hw_blockpool_entry *)
4790 				list_first_entry(&blockpool->free_block_list,
4791 					struct __vxge_hw_blockpool_entry,
4792 					item);
4793 
4794 		if (entry != NULL) {
4795 			list_del(&entry->item);
4796 			blockpool->pool_size--;
4797 		}
4798 	}
4799 
4800 	if (entry != NULL)
4801 		__vxge_hw_blockpool_blocks_add(blockpool);
4802 
4803 	return entry;
4804 }
4805 
4806 /*
4807  * vxge_hw_vpath_open - Open a virtual path on a given adapter
4808  * This function is used to open access to virtual path of an
4809  * adapter for offload, GRO operations. This function returns
4810  * synchronously.
4811  */
4812 enum vxge_hw_status
vxge_hw_vpath_open(struct __vxge_hw_device * hldev,struct vxge_hw_vpath_attr * attr,struct __vxge_hw_vpath_handle ** vpath_handle)4813 vxge_hw_vpath_open(struct __vxge_hw_device *hldev,
4814 		   struct vxge_hw_vpath_attr *attr,
4815 		   struct __vxge_hw_vpath_handle **vpath_handle)
4816 {
4817 	struct __vxge_hw_virtualpath *vpath;
4818 	struct __vxge_hw_vpath_handle *vp;
4819 	enum vxge_hw_status status;
4820 
4821 	vpath = &hldev->virtual_paths[attr->vp_id];
4822 
4823 	if (vpath->vp_open == VXGE_HW_VP_OPEN) {
4824 		status = VXGE_HW_ERR_INVALID_STATE;
4825 		goto vpath_open_exit1;
4826 	}
4827 
4828 	status = __vxge_hw_vp_initialize(hldev, attr->vp_id,
4829 			&hldev->config.vp_config[attr->vp_id]);
4830 	if (status != VXGE_HW_OK)
4831 		goto vpath_open_exit1;
4832 
4833 	vp = vzalloc(sizeof(struct __vxge_hw_vpath_handle));
4834 	if (vp == NULL) {
4835 		status = VXGE_HW_ERR_OUT_OF_MEMORY;
4836 		goto vpath_open_exit2;
4837 	}
4838 
4839 	vp->vpath = vpath;
4840 
4841 	if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
4842 		status = __vxge_hw_fifo_create(vp, &attr->fifo_attr);
4843 		if (status != VXGE_HW_OK)
4844 			goto vpath_open_exit6;
4845 	}
4846 
4847 	if (vpath->vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
4848 		status = __vxge_hw_ring_create(vp, &attr->ring_attr);
4849 		if (status != VXGE_HW_OK)
4850 			goto vpath_open_exit7;
4851 
4852 		__vxge_hw_vpath_prc_configure(hldev, attr->vp_id);
4853 	}
4854 
4855 	vpath->fifoh->tx_intr_num =
4856 		(attr->vp_id * VXGE_HW_MAX_INTR_PER_VP)  +
4857 			VXGE_HW_VPATH_INTR_TX;
4858 
4859 	vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev,
4860 				VXGE_HW_BLOCK_SIZE);
4861 	if (vpath->stats_block == NULL) {
4862 		status = VXGE_HW_ERR_OUT_OF_MEMORY;
4863 		goto vpath_open_exit8;
4864 	}
4865 
4866 	vpath->hw_stats = vpath->stats_block->memblock;
4867 	memset(vpath->hw_stats, 0,
4868 		sizeof(struct vxge_hw_vpath_stats_hw_info));
4869 
4870 	hldev->stats.hw_dev_info_stats.vpath_info[attr->vp_id] =
4871 						vpath->hw_stats;
4872 
4873 	vpath->hw_stats_sav =
4874 		&hldev->stats.hw_dev_info_stats.vpath_info_sav[attr->vp_id];
4875 	memset(vpath->hw_stats_sav, 0,
4876 			sizeof(struct vxge_hw_vpath_stats_hw_info));
4877 
4878 	writeq(vpath->stats_block->dma_addr, &vpath->vp_reg->stats_cfg);
4879 
4880 	status = vxge_hw_vpath_stats_enable(vp);
4881 	if (status != VXGE_HW_OK)
4882 		goto vpath_open_exit8;
4883 
4884 	list_add(&vp->item, &vpath->vpath_handles);
4885 
4886 	hldev->vpaths_deployed |= vxge_mBIT(vpath->vp_id);
4887 
4888 	*vpath_handle = vp;
4889 
4890 	attr->fifo_attr.userdata = vpath->fifoh;
4891 	attr->ring_attr.userdata = vpath->ringh;
4892 
4893 	return VXGE_HW_OK;
4894 
4895 vpath_open_exit8:
4896 	if (vpath->ringh != NULL)
4897 		__vxge_hw_ring_delete(vp);
4898 vpath_open_exit7:
4899 	if (vpath->fifoh != NULL)
4900 		__vxge_hw_fifo_delete(vp);
4901 vpath_open_exit6:
4902 	vfree(vp);
4903 vpath_open_exit2:
4904 	__vxge_hw_vp_terminate(hldev, attr->vp_id);
4905 vpath_open_exit1:
4906 
4907 	return status;
4908 }
4909 
4910 /**
4911  * vxge_hw_vpath_rx_doorbell_post - Close the handle got from previous vpath
4912  * (vpath) open
4913  * @vp: Handle got from previous vpath open
4914  *
4915  * This function is used to close access to virtual path opened
4916  * earlier.
4917  */
vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle * vp)4918 void vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
4919 {
4920 	struct __vxge_hw_virtualpath *vpath = vp->vpath;
4921 	struct __vxge_hw_ring *ring = vpath->ringh;
4922 	struct vxgedev *vdev = netdev_priv(vpath->hldev->ndev);
4923 	u64 new_count, val64, val164;
4924 
4925 	if (vdev->titan1) {
4926 		new_count = readq(&vpath->vp_reg->rxdmem_size);
4927 		new_count &= 0x1fff;
4928 	} else
4929 		new_count = ring->config->ring_blocks * VXGE_HW_BLOCK_SIZE / 8;
4930 
4931 	val164 = VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count);
4932 
4933 	writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164),
4934 		&vpath->vp_reg->prc_rxd_doorbell);
4935 	readl(&vpath->vp_reg->prc_rxd_doorbell);
4936 
4937 	val164 /= 2;
4938 	val64 = readq(&vpath->vp_reg->prc_cfg6);
4939 	val64 = VXGE_HW_PRC_CFG6_RXD_SPAT(val64);
4940 	val64 &= 0x1ff;
4941 
4942 	/*
4943 	 * Each RxD is of 4 qwords
4944 	 */
4945 	new_count -= (val64 + 1);
4946 	val64 = min(val164, new_count) / 4;
4947 
4948 	ring->rxds_limit = min(ring->rxds_limit, val64);
4949 	if (ring->rxds_limit < 4)
4950 		ring->rxds_limit = 4;
4951 }
4952 
4953 /*
4954  * __vxge_hw_blockpool_block_free - Frees a block from block pool
4955  * @devh: Hal device
4956  * @entry: Entry of block to be freed
4957  *
4958  * This function frees a block from block pool
4959  */
4960 static void
__vxge_hw_blockpool_block_free(struct __vxge_hw_device * devh,struct __vxge_hw_blockpool_entry * entry)4961 __vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
4962 			       struct __vxge_hw_blockpool_entry *entry)
4963 {
4964 	struct __vxge_hw_blockpool  *blockpool;
4965 
4966 	blockpool = &devh->block_pool;
4967 
4968 	if (entry->length == blockpool->block_size) {
4969 		list_add(&entry->item, &blockpool->free_block_list);
4970 		blockpool->pool_size++;
4971 	}
4972 
4973 	__vxge_hw_blockpool_blocks_remove(blockpool);
4974 }
4975 
4976 /*
4977  * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open
4978  * This function is used to close access to virtual path opened
4979  * earlier.
4980  */
vxge_hw_vpath_close(struct __vxge_hw_vpath_handle * vp)4981 enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp)
4982 {
4983 	struct __vxge_hw_virtualpath *vpath = NULL;
4984 	struct __vxge_hw_device *devh = NULL;
4985 	u32 vp_id = vp->vpath->vp_id;
4986 	u32 is_empty = TRUE;
4987 	enum vxge_hw_status status = VXGE_HW_OK;
4988 
4989 	vpath = vp->vpath;
4990 	devh = vpath->hldev;
4991 
4992 	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4993 		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4994 		goto vpath_close_exit;
4995 	}
4996 
4997 	list_del(&vp->item);
4998 
4999 	if (!list_empty(&vpath->vpath_handles)) {
5000 		list_add(&vp->item, &vpath->vpath_handles);
5001 		is_empty = FALSE;
5002 	}
5003 
5004 	if (!is_empty) {
5005 		status = VXGE_HW_FAIL;
5006 		goto vpath_close_exit;
5007 	}
5008 
5009 	devh->vpaths_deployed &= ~vxge_mBIT(vp_id);
5010 
5011 	if (vpath->ringh != NULL)
5012 		__vxge_hw_ring_delete(vp);
5013 
5014 	if (vpath->fifoh != NULL)
5015 		__vxge_hw_fifo_delete(vp);
5016 
5017 	if (vpath->stats_block != NULL)
5018 		__vxge_hw_blockpool_block_free(devh, vpath->stats_block);
5019 
5020 	vfree(vp);
5021 
5022 	__vxge_hw_vp_terminate(devh, vp_id);
5023 
5024 vpath_close_exit:
5025 	return status;
5026 }
5027 
5028 /*
5029  * vxge_hw_vpath_reset - Resets vpath
5030  * This function is used to request a reset of vpath
5031  */
vxge_hw_vpath_reset(struct __vxge_hw_vpath_handle * vp)5032 enum vxge_hw_status vxge_hw_vpath_reset(struct __vxge_hw_vpath_handle *vp)
5033 {
5034 	enum vxge_hw_status status;
5035 	u32 vp_id;
5036 	struct __vxge_hw_virtualpath *vpath = vp->vpath;
5037 
5038 	vp_id = vpath->vp_id;
5039 
5040 	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
5041 		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
5042 		goto exit;
5043 	}
5044 
5045 	status = __vxge_hw_vpath_reset(vpath->hldev, vp_id);
5046 	if (status == VXGE_HW_OK)
5047 		vpath->sw_stats->soft_reset_cnt++;
5048 exit:
5049 	return status;
5050 }
5051 
5052 /*
5053  * vxge_hw_vpath_recover_from_reset - Poll for reset complete and re-initialize.
5054  * This function poll's for the vpath reset completion and re initializes
5055  * the vpath.
5056  */
5057 enum vxge_hw_status
vxge_hw_vpath_recover_from_reset(struct __vxge_hw_vpath_handle * vp)5058 vxge_hw_vpath_recover_from_reset(struct __vxge_hw_vpath_handle *vp)
5059 {
5060 	struct __vxge_hw_virtualpath *vpath = NULL;
5061 	enum vxge_hw_status status;
5062 	struct __vxge_hw_device *hldev;
5063 	u32 vp_id;
5064 
5065 	vp_id = vp->vpath->vp_id;
5066 	vpath = vp->vpath;
5067 	hldev = vpath->hldev;
5068 
5069 	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
5070 		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
5071 		goto exit;
5072 	}
5073 
5074 	status = __vxge_hw_vpath_reset_check(vpath);
5075 	if (status != VXGE_HW_OK)
5076 		goto exit;
5077 
5078 	status = __vxge_hw_vpath_sw_reset(hldev, vp_id);
5079 	if (status != VXGE_HW_OK)
5080 		goto exit;
5081 
5082 	status = __vxge_hw_vpath_initialize(hldev, vp_id);
5083 	if (status != VXGE_HW_OK)
5084 		goto exit;
5085 
5086 	if (vpath->ringh != NULL)
5087 		__vxge_hw_vpath_prc_configure(hldev, vp_id);
5088 
5089 	memset(vpath->hw_stats, 0,
5090 		sizeof(struct vxge_hw_vpath_stats_hw_info));
5091 
5092 	memset(vpath->hw_stats_sav, 0,
5093 		sizeof(struct vxge_hw_vpath_stats_hw_info));
5094 
5095 	writeq(vpath->stats_block->dma_addr,
5096 		&vpath->vp_reg->stats_cfg);
5097 
5098 	status = vxge_hw_vpath_stats_enable(vp);
5099 
5100 exit:
5101 	return status;
5102 }
5103 
5104 /*
5105  * vxge_hw_vpath_enable - Enable vpath.
5106  * This routine clears the vpath reset thereby enabling a vpath
5107  * to start forwarding frames and generating interrupts.
5108  */
5109 void
vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle * vp)5110 vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp)
5111 {
5112 	struct __vxge_hw_device *hldev;
5113 	u64 val64;
5114 
5115 	hldev = vp->vpath->hldev;
5116 
5117 	val64 = VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(
5118 		1 << (16 - vp->vpath->vp_id));
5119 
5120 	__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
5121 		&hldev->common_reg->cmn_rsthdlr_cfg1);
5122 }
5123