• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  linux/drivers/net/ehea/ehea_main.c
3  *
4  *  eHEA ethernet device driver for IBM eServer System p
5  *
6  *  (C) Copyright IBM Corp. 2006
7  *
8  *  Authors:
9  *	 Christoph Raisch <raisch@de.ibm.com>
10  *	 Jan-Bernd Themann <themann@de.ibm.com>
11  *	 Thomas Klein <tklein@de.ibm.com>
12  *
13  *
14  * This program is free software; you can redistribute it and/or modify
15  * it under the terms of the GNU General Public License as published by
16  * the Free Software Foundation; either version 2, or (at your option)
17  * any later version.
18  *
19  * This program is distributed in the hope that it will be useful,
20  * but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
22  * GNU General Public License for more details.
23  *
24  * You should have received a copy of the GNU General Public License
25  * along with this program; if not, write to the Free Software
26  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27  */
28 
29 #include <linux/in.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/udp.h>
33 #include <linux/if.h>
34 #include <linux/list.h>
35 #include <linux/if_ether.h>
36 #include <linux/notifier.h>
37 #include <linux/reboot.h>
38 #include <linux/memory.h>
39 #include <asm/kexec.h>
40 #include <linux/mutex.h>
41 
42 #include <net/ip.h>
43 
44 #include "ehea.h"
45 #include "ehea_qmr.h"
46 #include "ehea_phyp.h"
47 
48 
49 MODULE_LICENSE("GPL");
50 MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
51 MODULE_DESCRIPTION("IBM eServer HEA Driver");
52 MODULE_VERSION(DRV_VERSION);
53 
54 
55 static int msg_level = -1;
56 static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
57 static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
58 static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
59 static int sq_entries = EHEA_DEF_ENTRIES_SQ;
60 static int use_mcs;
61 static int use_lro;
62 static int lro_max_aggr = EHEA_LRO_MAX_AGGR;
63 static int num_tx_qps = EHEA_NUM_TX_QP;
64 static int prop_carrier_state;
65 
66 module_param(msg_level, int, 0);
67 module_param(rq1_entries, int, 0);
68 module_param(rq2_entries, int, 0);
69 module_param(rq3_entries, int, 0);
70 module_param(sq_entries, int, 0);
71 module_param(prop_carrier_state, int, 0);
72 module_param(use_mcs, int, 0);
73 module_param(use_lro, int, 0);
74 module_param(lro_max_aggr, int, 0);
75 module_param(num_tx_qps, int, 0);
76 
77 MODULE_PARM_DESC(num_tx_qps, "Number of TX-QPS");
78 MODULE_PARM_DESC(msg_level, "msg_level");
79 MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical "
80 		 "port to stack. 1:yes, 0:no.  Default = 0 ");
81 MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
82 		 "[2^x - 1], x = [6..14]. Default = "
83 		 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")");
84 MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 "
85 		 "[2^x - 1], x = [6..14]. Default = "
86 		 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")");
87 MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
88 		 "[2^x - 1], x = [6..14]. Default = "
89 		 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")");
90 MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue  "
91 		 "[2^x - 1], x = [6..14]. Default = "
92 		 __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
93 MODULE_PARM_DESC(use_mcs, " 0:NAPI, 1:Multiple receive queues, Default = 0 ");
94 
95 MODULE_PARM_DESC(lro_max_aggr, " LRO: Max packets to be aggregated. Default = "
96 		 __MODULE_STRING(EHEA_LRO_MAX_AGGR));
97 MODULE_PARM_DESC(use_lro, " Large Receive Offload, 1: enable, 0: disable, "
98 		 "Default = 0");
99 
100 static int port_name_cnt;
101 static LIST_HEAD(adapter_list);
102 static unsigned long ehea_driver_flags;
103 struct work_struct ehea_rereg_mr_task;
104 static DEFINE_MUTEX(dlpar_mem_lock);
105 struct ehea_fw_handle_array ehea_fw_handles;
106 struct ehea_bcmc_reg_array ehea_bcmc_regs;
107 
108 
109 static int __devinit ehea_probe_adapter(struct of_device *dev,
110 					const struct of_device_id *id);
111 
112 static int __devexit ehea_remove(struct of_device *dev);
113 
114 static struct of_device_id ehea_device_table[] = {
115 	{
116 		.name = "lhea",
117 		.compatible = "IBM,lhea",
118 	},
119 	{},
120 };
121 MODULE_DEVICE_TABLE(of, ehea_device_table);
122 
123 static struct of_platform_driver ehea_driver = {
124 	.name = "ehea",
125 	.match_table = ehea_device_table,
126 	.probe = ehea_probe_adapter,
127 	.remove = ehea_remove,
128 };
129 
ehea_dump(void * adr,int len,char * msg)130 void ehea_dump(void *adr, int len, char *msg)
131 {
132 	int x;
133 	unsigned char *deb = adr;
134 	for (x = 0; x < len; x += 16) {
135 		printk(DRV_NAME " %s adr=%p ofs=%04x %016llx %016llx\n", msg,
136 			  deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
137 		deb += 16;
138 	}
139 }
140 
ehea_schedule_port_reset(struct ehea_port * port)141 void ehea_schedule_port_reset(struct ehea_port *port)
142 {
143 	if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags))
144 		schedule_work(&port->reset_task);
145 }
146 
ehea_update_firmware_handles(void)147 static void ehea_update_firmware_handles(void)
148 {
149 	struct ehea_fw_handle_entry *arr = NULL;
150 	struct ehea_adapter *adapter;
151 	int num_adapters = 0;
152 	int num_ports = 0;
153 	int num_portres = 0;
154 	int i = 0;
155 	int num_fw_handles, k, l;
156 
157 	/* Determine number of handles */
158 	list_for_each_entry(adapter, &adapter_list, list) {
159 		num_adapters++;
160 
161 		for (k = 0; k < EHEA_MAX_PORTS; k++) {
162 			struct ehea_port *port = adapter->port[k];
163 
164 			if (!port || (port->state != EHEA_PORT_UP))
165 				continue;
166 
167 			num_ports++;
168 			num_portres += port->num_def_qps + port->num_add_tx_qps;
169 		}
170 	}
171 
172 	num_fw_handles = num_adapters * EHEA_NUM_ADAPTER_FW_HANDLES +
173 			 num_ports * EHEA_NUM_PORT_FW_HANDLES +
174 			 num_portres * EHEA_NUM_PORTRES_FW_HANDLES;
175 
176 	if (num_fw_handles) {
177 		arr = kzalloc(num_fw_handles * sizeof(*arr), GFP_KERNEL);
178 		if (!arr)
179 			return;  /* Keep the existing array */
180 	} else
181 		goto out_update;
182 
183 	list_for_each_entry(adapter, &adapter_list, list) {
184 		for (k = 0; k < EHEA_MAX_PORTS; k++) {
185 			struct ehea_port *port = adapter->port[k];
186 
187 			if (!port || (port->state != EHEA_PORT_UP))
188 				continue;
189 
190 			for (l = 0;
191 			     l < port->num_def_qps + port->num_add_tx_qps;
192 			     l++) {
193 				struct ehea_port_res *pr = &port->port_res[l];
194 
195 				arr[i].adh = adapter->handle;
196 				arr[i++].fwh = pr->qp->fw_handle;
197 				arr[i].adh = adapter->handle;
198 				arr[i++].fwh = pr->send_cq->fw_handle;
199 				arr[i].adh = adapter->handle;
200 				arr[i++].fwh = pr->recv_cq->fw_handle;
201 				arr[i].adh = adapter->handle;
202 				arr[i++].fwh = pr->eq->fw_handle;
203 				arr[i].adh = adapter->handle;
204 				arr[i++].fwh = pr->send_mr.handle;
205 				arr[i].adh = adapter->handle;
206 				arr[i++].fwh = pr->recv_mr.handle;
207 			}
208 			arr[i].adh = adapter->handle;
209 			arr[i++].fwh = port->qp_eq->fw_handle;
210 		}
211 
212 		arr[i].adh = adapter->handle;
213 		arr[i++].fwh = adapter->neq->fw_handle;
214 
215 		if (adapter->mr.handle) {
216 			arr[i].adh = adapter->handle;
217 			arr[i++].fwh = adapter->mr.handle;
218 		}
219 	}
220 
221 out_update:
222 	kfree(ehea_fw_handles.arr);
223 	ehea_fw_handles.arr = arr;
224 	ehea_fw_handles.num_entries = i;
225 }
226 
ehea_update_bcmc_registrations(void)227 static void ehea_update_bcmc_registrations(void)
228 {
229 	struct ehea_bcmc_reg_entry *arr = NULL;
230 	struct ehea_adapter *adapter;
231 	struct ehea_mc_list *mc_entry;
232 	int num_registrations = 0;
233 	int i = 0;
234 	int k;
235 
236 	/* Determine number of registrations */
237 	list_for_each_entry(adapter, &adapter_list, list)
238 		for (k = 0; k < EHEA_MAX_PORTS; k++) {
239 			struct ehea_port *port = adapter->port[k];
240 
241 			if (!port || (port->state != EHEA_PORT_UP))
242 				continue;
243 
244 			num_registrations += 2;	/* Broadcast registrations */
245 
246 			list_for_each_entry(mc_entry, &port->mc_list->list,list)
247 				num_registrations += 2;
248 		}
249 
250 	if (num_registrations) {
251 		arr = kzalloc(num_registrations * sizeof(*arr), GFP_ATOMIC);
252 		if (!arr)
253 			return;  /* Keep the existing array */
254 	} else
255 		goto out_update;
256 
257 	list_for_each_entry(adapter, &adapter_list, list) {
258 		for (k = 0; k < EHEA_MAX_PORTS; k++) {
259 			struct ehea_port *port = adapter->port[k];
260 
261 			if (!port || (port->state != EHEA_PORT_UP))
262 				continue;
263 
264 			arr[i].adh = adapter->handle;
265 			arr[i].port_id = port->logical_port_id;
266 			arr[i].reg_type = EHEA_BCMC_BROADCAST |
267 					  EHEA_BCMC_UNTAGGED;
268 			arr[i++].macaddr = port->mac_addr;
269 
270 			arr[i].adh = adapter->handle;
271 			arr[i].port_id = port->logical_port_id;
272 			arr[i].reg_type = EHEA_BCMC_BROADCAST |
273 					  EHEA_BCMC_VLANID_ALL;
274 			arr[i++].macaddr = port->mac_addr;
275 
276 			list_for_each_entry(mc_entry,
277 					    &port->mc_list->list, list) {
278 				arr[i].adh = adapter->handle;
279 				arr[i].port_id = port->logical_port_id;
280 				arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
281 						  EHEA_BCMC_MULTICAST |
282 						  EHEA_BCMC_UNTAGGED;
283 				arr[i++].macaddr = mc_entry->macaddr;
284 
285 				arr[i].adh = adapter->handle;
286 				arr[i].port_id = port->logical_port_id;
287 				arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
288 						  EHEA_BCMC_MULTICAST |
289 						  EHEA_BCMC_VLANID_ALL;
290 				arr[i++].macaddr = mc_entry->macaddr;
291 			}
292 		}
293 	}
294 
295 out_update:
296 	kfree(ehea_bcmc_regs.arr);
297 	ehea_bcmc_regs.arr = arr;
298 	ehea_bcmc_regs.num_entries = i;
299 }
300 
ehea_get_stats(struct net_device * dev)301 static struct net_device_stats *ehea_get_stats(struct net_device *dev)
302 {
303 	struct ehea_port *port = netdev_priv(dev);
304 	struct net_device_stats *stats = &port->stats;
305 	struct hcp_ehea_port_cb2 *cb2;
306 	u64 hret, rx_packets, tx_packets;
307 	int i;
308 
309 	memset(stats, 0, sizeof(*stats));
310 
311 	cb2 = kzalloc(PAGE_SIZE, GFP_ATOMIC);
312 	if (!cb2) {
313 		ehea_error("no mem for cb2");
314 		goto out;
315 	}
316 
317 	hret = ehea_h_query_ehea_port(port->adapter->handle,
318 				      port->logical_port_id,
319 				      H_PORT_CB2, H_PORT_CB2_ALL, cb2);
320 	if (hret != H_SUCCESS) {
321 		ehea_error("query_ehea_port failed");
322 		goto out_herr;
323 	}
324 
325 	if (netif_msg_hw(port))
326 		ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
327 
328 	rx_packets = 0;
329 	for (i = 0; i < port->num_def_qps; i++)
330 		rx_packets += port->port_res[i].rx_packets;
331 
332 	tx_packets = 0;
333 	for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
334 		tx_packets += port->port_res[i].tx_packets;
335 
336 	stats->tx_packets = tx_packets;
337 	stats->multicast = cb2->rxmcp;
338 	stats->rx_errors = cb2->rxuerr;
339 	stats->rx_bytes = cb2->rxo;
340 	stats->tx_bytes = cb2->txo;
341 	stats->rx_packets = rx_packets;
342 
343 out_herr:
344 	kfree(cb2);
345 out:
346 	return stats;
347 }
348 
ehea_refill_rq1(struct ehea_port_res * pr,int index,int nr_of_wqes)349 static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
350 {
351 	struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
352 	struct net_device *dev = pr->port->netdev;
353 	int max_index_mask = pr->rq1_skba.len - 1;
354 	int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes;
355 	int adder = 0;
356 	int i;
357 
358 	pr->rq1_skba.os_skbs = 0;
359 
360 	if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
361 		if (nr_of_wqes > 0)
362 			pr->rq1_skba.index = index;
363 		pr->rq1_skba.os_skbs = fill_wqes;
364 		return;
365 	}
366 
367 	for (i = 0; i < fill_wqes; i++) {
368 		if (!skb_arr_rq1[index]) {
369 			skb_arr_rq1[index] = netdev_alloc_skb(dev,
370 							      EHEA_L_PKT_SIZE);
371 			if (!skb_arr_rq1[index]) {
372 				pr->rq1_skba.os_skbs = fill_wqes - i;
373 				ehea_error("%s: no mem for skb/%d wqes filled",
374 					   dev->name, i);
375 				break;
376 			}
377 		}
378 		index--;
379 		index &= max_index_mask;
380 		adder++;
381 	}
382 
383 	if (adder == 0)
384 		return;
385 
386 	/* Ring doorbell */
387 	ehea_update_rq1a(pr->qp, adder);
388 }
389 
ehea_init_fill_rq1(struct ehea_port_res * pr,int nr_rq1a)390 static int ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
391 {
392 	int ret = 0;
393 	struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
394 	struct net_device *dev = pr->port->netdev;
395 	int i;
396 
397 	for (i = 0; i < pr->rq1_skba.len; i++) {
398 		skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
399 		if (!skb_arr_rq1[i]) {
400 			ehea_error("%s: no mem for skb/%d wqes filled",
401 				   dev->name, i);
402 			ret = -ENOMEM;
403 			goto out;
404 		}
405 	}
406 	/* Ring doorbell */
407 	ehea_update_rq1a(pr->qp, nr_rq1a);
408 out:
409 	return ret;
410 }
411 
ehea_refill_rq_def(struct ehea_port_res * pr,struct ehea_q_skb_arr * q_skba,int rq_nr,int num_wqes,int wqe_type,int packet_size)412 static int ehea_refill_rq_def(struct ehea_port_res *pr,
413 			      struct ehea_q_skb_arr *q_skba, int rq_nr,
414 			      int num_wqes, int wqe_type, int packet_size)
415 {
416 	struct net_device *dev = pr->port->netdev;
417 	struct ehea_qp *qp = pr->qp;
418 	struct sk_buff **skb_arr = q_skba->arr;
419 	struct ehea_rwqe *rwqe;
420 	int i, index, max_index_mask, fill_wqes;
421 	int adder = 0;
422 	int ret = 0;
423 
424 	fill_wqes = q_skba->os_skbs + num_wqes;
425 	q_skba->os_skbs = 0;
426 
427 	if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
428 		q_skba->os_skbs = fill_wqes;
429 		return ret;
430 	}
431 
432 	index = q_skba->index;
433 	max_index_mask = q_skba->len - 1;
434 	for (i = 0; i < fill_wqes; i++) {
435 		u64 tmp_addr;
436 		struct sk_buff *skb = netdev_alloc_skb(dev, packet_size);
437 		if (!skb) {
438 			ehea_error("%s: no mem for skb/%d wqes filled",
439 				   pr->port->netdev->name, i);
440 			q_skba->os_skbs = fill_wqes - i;
441 			ret = -ENOMEM;
442 			break;
443 		}
444 		skb_reserve(skb, NET_IP_ALIGN);
445 
446 		skb_arr[index] = skb;
447 		tmp_addr = ehea_map_vaddr(skb->data);
448 		if (tmp_addr == -1) {
449 			dev_kfree_skb(skb);
450 			q_skba->os_skbs = fill_wqes - i;
451 			ret = 0;
452 			break;
453 		}
454 
455 		rwqe = ehea_get_next_rwqe(qp, rq_nr);
456 		rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type)
457 			    | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
458 		rwqe->sg_list[0].l_key = pr->recv_mr.lkey;
459 		rwqe->sg_list[0].vaddr = tmp_addr;
460 		rwqe->sg_list[0].len = packet_size;
461 		rwqe->data_segments = 1;
462 
463 		index++;
464 		index &= max_index_mask;
465 		adder++;
466 	}
467 
468 	q_skba->index = index;
469 	if (adder == 0)
470 		goto out;
471 
472 	/* Ring doorbell */
473 	iosync();
474 	if (rq_nr == 2)
475 		ehea_update_rq2a(pr->qp, adder);
476 	else
477 		ehea_update_rq3a(pr->qp, adder);
478 out:
479 	return ret;
480 }
481 
482 
ehea_refill_rq2(struct ehea_port_res * pr,int nr_of_wqes)483 static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes)
484 {
485 	return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
486 				  nr_of_wqes, EHEA_RWQE2_TYPE,
487 				  EHEA_RQ2_PKT_SIZE + NET_IP_ALIGN);
488 }
489 
490 
ehea_refill_rq3(struct ehea_port_res * pr,int nr_of_wqes)491 static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes)
492 {
493 	return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
494 				  nr_of_wqes, EHEA_RWQE3_TYPE,
495 				  EHEA_MAX_PACKET_SIZE + NET_IP_ALIGN);
496 }
497 
ehea_check_cqe(struct ehea_cqe * cqe,int * rq_num)498 static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
499 {
500 	*rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5;
501 	if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0)
502 		return 0;
503 	if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) &&
504 	    (cqe->header_length == 0))
505 		return 0;
506 	return -EINVAL;
507 }
508 
ehea_fill_skb(struct net_device * dev,struct sk_buff * skb,struct ehea_cqe * cqe)509 static inline void ehea_fill_skb(struct net_device *dev,
510 				 struct sk_buff *skb, struct ehea_cqe *cqe)
511 {
512 	int length = cqe->num_bytes_transfered - 4;	/*remove CRC */
513 
514 	skb_put(skb, length);
515 	skb->ip_summed = CHECKSUM_UNNECESSARY;
516 	skb->protocol = eth_type_trans(skb, dev);
517 }
518 
get_skb_by_index(struct sk_buff ** skb_array,int arr_len,struct ehea_cqe * cqe)519 static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
520 					       int arr_len,
521 					       struct ehea_cqe *cqe)
522 {
523 	int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
524 	struct sk_buff *skb;
525 	void *pref;
526 	int x;
527 
528 	x = skb_index + 1;
529 	x &= (arr_len - 1);
530 
531 	pref = skb_array[x];
532 	prefetchw(pref);
533 	prefetchw(pref + EHEA_CACHE_LINE);
534 
535 	pref = (skb_array[x]->data);
536 	prefetch(pref);
537 	prefetch(pref + EHEA_CACHE_LINE);
538 	prefetch(pref + EHEA_CACHE_LINE * 2);
539 	prefetch(pref + EHEA_CACHE_LINE * 3);
540 	skb = skb_array[skb_index];
541 	skb_array[skb_index] = NULL;
542 	return skb;
543 }
544 
get_skb_by_index_ll(struct sk_buff ** skb_array,int arr_len,int wqe_index)545 static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array,
546 						  int arr_len, int wqe_index)
547 {
548 	struct sk_buff *skb;
549 	void *pref;
550 	int x;
551 
552 	x = wqe_index + 1;
553 	x &= (arr_len - 1);
554 
555 	pref = skb_array[x];
556 	prefetchw(pref);
557 	prefetchw(pref + EHEA_CACHE_LINE);
558 
559 	pref = (skb_array[x]->data);
560 	prefetchw(pref);
561 	prefetchw(pref + EHEA_CACHE_LINE);
562 
563 	skb = skb_array[wqe_index];
564 	skb_array[wqe_index] = NULL;
565 	return skb;
566 }
567 
ehea_treat_poll_error(struct ehea_port_res * pr,int rq,struct ehea_cqe * cqe,int * processed_rq2,int * processed_rq3)568 static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
569 				 struct ehea_cqe *cqe, int *processed_rq2,
570 				 int *processed_rq3)
571 {
572 	struct sk_buff *skb;
573 
574 	if (cqe->status & EHEA_CQE_STAT_ERR_TCP)
575 		pr->p_stats.err_tcp_cksum++;
576 	if (cqe->status & EHEA_CQE_STAT_ERR_IP)
577 		pr->p_stats.err_ip_cksum++;
578 	if (cqe->status & EHEA_CQE_STAT_ERR_CRC)
579 		pr->p_stats.err_frame_crc++;
580 
581 	if (rq == 2) {
582 		*processed_rq2 += 1;
583 		skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe);
584 		dev_kfree_skb(skb);
585 	} else if (rq == 3) {
586 		*processed_rq3 += 1;
587 		skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe);
588 		dev_kfree_skb(skb);
589 	}
590 
591 	if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
592 		if (netif_msg_rx_err(pr->port)) {
593 			ehea_error("Critical receive error for QP %d. "
594 				   "Resetting port.", pr->qp->init_attr.qp_nr);
595 			ehea_dump(cqe, sizeof(*cqe), "CQE");
596 		}
597 		ehea_schedule_port_reset(pr->port);
598 		return 1;
599 	}
600 
601 	return 0;
602 }
603 
get_skb_hdr(struct sk_buff * skb,void ** iphdr,void ** tcph,u64 * hdr_flags,void * priv)604 static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
605 		       void **tcph, u64 *hdr_flags, void *priv)
606 {
607 	struct ehea_cqe *cqe = priv;
608 	unsigned int ip_len;
609 	struct iphdr *iph;
610 
611 	/* non tcp/udp packets */
612 	if (!cqe->header_length)
613 		return -1;
614 
615 	/* non tcp packet */
616 	skb_reset_network_header(skb);
617 	iph = ip_hdr(skb);
618 	if (iph->protocol != IPPROTO_TCP)
619 		return -1;
620 
621 	ip_len = ip_hdrlen(skb);
622 	skb_set_transport_header(skb, ip_len);
623 	*tcph = tcp_hdr(skb);
624 
625 	/* check if ip header and tcp header are complete */
626 	if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
627 		return -1;
628 
629 	*hdr_flags = LRO_IPV4 | LRO_TCP;
630 	*iphdr = iph;
631 
632 	return 0;
633 }
634 
ehea_proc_skb(struct ehea_port_res * pr,struct ehea_cqe * cqe,struct sk_buff * skb)635 static void ehea_proc_skb(struct ehea_port_res *pr, struct ehea_cqe *cqe,
636 			  struct sk_buff *skb)
637 {
638 	int vlan_extracted = (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT)
639 		&& pr->port->vgrp;
640 
641 	if (use_lro) {
642 		if (vlan_extracted)
643 			lro_vlan_hwaccel_receive_skb(&pr->lro_mgr, skb,
644 						     pr->port->vgrp,
645 						     cqe->vlan_tag,
646 						     cqe);
647 		else
648 			lro_receive_skb(&pr->lro_mgr, skb, cqe);
649 	} else {
650 		if (vlan_extracted)
651 			vlan_hwaccel_receive_skb(skb, pr->port->vgrp,
652 						 cqe->vlan_tag);
653 		else
654 			netif_receive_skb(skb);
655 	}
656 }
657 
ehea_proc_rwqes(struct net_device * dev,struct ehea_port_res * pr,int budget)658 static int ehea_proc_rwqes(struct net_device *dev,
659 			   struct ehea_port_res *pr,
660 			   int budget)
661 {
662 	struct ehea_port *port = pr->port;
663 	struct ehea_qp *qp = pr->qp;
664 	struct ehea_cqe *cqe;
665 	struct sk_buff *skb;
666 	struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
667 	struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr;
668 	struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr;
669 	int skb_arr_rq1_len = pr->rq1_skba.len;
670 	int skb_arr_rq2_len = pr->rq2_skba.len;
671 	int skb_arr_rq3_len = pr->rq3_skba.len;
672 	int processed, processed_rq1, processed_rq2, processed_rq3;
673 	int wqe_index, last_wqe_index, rq, port_reset;
674 
675 	processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
676 	last_wqe_index = 0;
677 
678 	cqe = ehea_poll_rq1(qp, &wqe_index);
679 	while ((processed < budget) && cqe) {
680 		ehea_inc_rq1(qp);
681 		processed_rq1++;
682 		processed++;
683 		if (netif_msg_rx_status(port))
684 			ehea_dump(cqe, sizeof(*cqe), "CQE");
685 
686 		last_wqe_index = wqe_index;
687 		rmb();
688 		if (!ehea_check_cqe(cqe, &rq)) {
689 			if (rq == 1) {
690 				/* LL RQ1 */
691 				skb = get_skb_by_index_ll(skb_arr_rq1,
692 							  skb_arr_rq1_len,
693 							  wqe_index);
694 				if (unlikely(!skb)) {
695 					if (netif_msg_rx_err(port))
696 						ehea_error("LL rq1: skb=NULL");
697 
698 					skb = netdev_alloc_skb(dev,
699 							       EHEA_L_PKT_SIZE);
700 					if (!skb)
701 						break;
702 				}
703 				skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
704 						 cqe->num_bytes_transfered - 4);
705 				ehea_fill_skb(dev, skb, cqe);
706 			} else if (rq == 2) {
707 				/* RQ2 */
708 				skb = get_skb_by_index(skb_arr_rq2,
709 						       skb_arr_rq2_len, cqe);
710 				if (unlikely(!skb)) {
711 					if (netif_msg_rx_err(port))
712 						ehea_error("rq2: skb=NULL");
713 					break;
714 				}
715 				ehea_fill_skb(dev, skb, cqe);
716 				processed_rq2++;
717 			} else {
718 				/* RQ3 */
719 				skb = get_skb_by_index(skb_arr_rq3,
720 						       skb_arr_rq3_len, cqe);
721 				if (unlikely(!skb)) {
722 					if (netif_msg_rx_err(port))
723 						ehea_error("rq3: skb=NULL");
724 					break;
725 				}
726 				ehea_fill_skb(dev, skb, cqe);
727 				processed_rq3++;
728 			}
729 
730 			ehea_proc_skb(pr, cqe, skb);
731 		} else {
732 			pr->p_stats.poll_receive_errors++;
733 			port_reset = ehea_treat_poll_error(pr, rq, cqe,
734 							   &processed_rq2,
735 							   &processed_rq3);
736 			if (port_reset)
737 				break;
738 		}
739 		cqe = ehea_poll_rq1(qp, &wqe_index);
740 	}
741 	if (use_lro)
742 		lro_flush_all(&pr->lro_mgr);
743 
744 	pr->rx_packets += processed;
745 
746 	ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
747 	ehea_refill_rq2(pr, processed_rq2);
748 	ehea_refill_rq3(pr, processed_rq3);
749 
750 	return processed;
751 }
752 
ehea_proc_cqes(struct ehea_port_res * pr,int my_quota)753 static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
754 {
755 	struct sk_buff *skb;
756 	struct ehea_cq *send_cq = pr->send_cq;
757 	struct ehea_cqe *cqe;
758 	int quota = my_quota;
759 	int cqe_counter = 0;
760 	int swqe_av = 0;
761 	int index;
762 	unsigned long flags;
763 
764 	cqe = ehea_poll_cq(send_cq);
765 	while (cqe && (quota > 0)) {
766 		ehea_inc_cq(send_cq);
767 
768 		cqe_counter++;
769 		rmb();
770 		if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
771 			ehea_error("Send Completion Error: Resetting port");
772 			if (netif_msg_tx_err(pr->port))
773 				ehea_dump(cqe, sizeof(*cqe), "Send CQE");
774 			ehea_schedule_port_reset(pr->port);
775 			break;
776 		}
777 
778 		if (netif_msg_tx_done(pr->port))
779 			ehea_dump(cqe, sizeof(*cqe), "CQE");
780 
781 		if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id)
782 			   == EHEA_SWQE2_TYPE)) {
783 
784 			index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
785 			skb = pr->sq_skba.arr[index];
786 			dev_kfree_skb(skb);
787 			pr->sq_skba.arr[index] = NULL;
788 		}
789 
790 		swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id);
791 		quota--;
792 
793 		cqe = ehea_poll_cq(send_cq);
794 	};
795 
796 	ehea_update_feca(send_cq, cqe_counter);
797 	atomic_add(swqe_av, &pr->swqe_avail);
798 
799 	spin_lock_irqsave(&pr->netif_queue, flags);
800 
801 	if (pr->queue_stopped && (atomic_read(&pr->swqe_avail)
802 				  >= pr->swqe_refill_th)) {
803 		netif_wake_queue(pr->port->netdev);
804 		pr->queue_stopped = 0;
805 	}
806 	spin_unlock_irqrestore(&pr->netif_queue, flags);
807 
808 	return cqe;
809 }
810 
811 #define EHEA_NAPI_POLL_NUM_BEFORE_IRQ 16
812 #define EHEA_POLL_MAX_CQES 65535
813 
ehea_poll(struct napi_struct * napi,int budget)814 static int ehea_poll(struct napi_struct *napi, int budget)
815 {
816 	struct ehea_port_res *pr = container_of(napi, struct ehea_port_res,
817 						napi);
818 	struct net_device *dev = pr->port->netdev;
819 	struct ehea_cqe *cqe;
820 	struct ehea_cqe *cqe_skb = NULL;
821 	int force_irq, wqe_index;
822 	int rx = 0;
823 
824 	force_irq = (pr->poll_counter > EHEA_NAPI_POLL_NUM_BEFORE_IRQ);
825 	cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
826 
827 	if (!force_irq)
828 		rx += ehea_proc_rwqes(dev, pr, budget - rx);
829 
830 	while ((rx != budget) || force_irq) {
831 		pr->poll_counter = 0;
832 		force_irq = 0;
833 		netif_rx_complete(napi);
834 		ehea_reset_cq_ep(pr->recv_cq);
835 		ehea_reset_cq_ep(pr->send_cq);
836 		ehea_reset_cq_n1(pr->recv_cq);
837 		ehea_reset_cq_n1(pr->send_cq);
838 		cqe = ehea_poll_rq1(pr->qp, &wqe_index);
839 		cqe_skb = ehea_poll_cq(pr->send_cq);
840 
841 		if (!cqe && !cqe_skb)
842 			return rx;
843 
844 		if (!netif_rx_reschedule(napi))
845 			return rx;
846 
847 		cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
848 		rx += ehea_proc_rwqes(dev, pr, budget - rx);
849 	}
850 
851 	pr->poll_counter++;
852 	return rx;
853 }
854 
855 #ifdef CONFIG_NET_POLL_CONTROLLER
ehea_netpoll(struct net_device * dev)856 static void ehea_netpoll(struct net_device *dev)
857 {
858 	struct ehea_port *port = netdev_priv(dev);
859 	int i;
860 
861 	for (i = 0; i < port->num_def_qps; i++)
862 		netif_rx_schedule(&port->port_res[i].napi);
863 }
864 #endif
865 
ehea_recv_irq_handler(int irq,void * param)866 static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
867 {
868 	struct ehea_port_res *pr = param;
869 
870 	netif_rx_schedule(&pr->napi);
871 
872 	return IRQ_HANDLED;
873 }
874 
ehea_qp_aff_irq_handler(int irq,void * param)875 static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
876 {
877 	struct ehea_port *port = param;
878 	struct ehea_eqe *eqe;
879 	struct ehea_qp *qp;
880 	u32 qp_token;
881 
882 	eqe = ehea_poll_eq(port->qp_eq);
883 
884 	while (eqe) {
885 		qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
886 		ehea_error("QP aff_err: entry=0x%llx, token=0x%x",
887 			   eqe->entry, qp_token);
888 
889 		qp = port->port_res[qp_token].qp;
890 		ehea_error_data(port->adapter, qp->fw_handle);
891 		eqe = ehea_poll_eq(port->qp_eq);
892 	}
893 
894 	ehea_schedule_port_reset(port);
895 
896 	return IRQ_HANDLED;
897 }
898 
ehea_get_port(struct ehea_adapter * adapter,int logical_port)899 static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter,
900 				       int logical_port)
901 {
902 	int i;
903 
904 	for (i = 0; i < EHEA_MAX_PORTS; i++)
905 		if (adapter->port[i])
906 			if (adapter->port[i]->logical_port_id == logical_port)
907 				return adapter->port[i];
908 	return NULL;
909 }
910 
ehea_sense_port_attr(struct ehea_port * port)911 int ehea_sense_port_attr(struct ehea_port *port)
912 {
913 	int ret;
914 	u64 hret;
915 	struct hcp_ehea_port_cb0 *cb0;
916 
917 	/* may be called via ehea_neq_tasklet() */
918 	cb0 = kzalloc(PAGE_SIZE, GFP_ATOMIC);
919 	if (!cb0) {
920 		ehea_error("no mem for cb0");
921 		ret = -ENOMEM;
922 		goto out;
923 	}
924 
925 	hret = ehea_h_query_ehea_port(port->adapter->handle,
926 				      port->logical_port_id, H_PORT_CB0,
927 				      EHEA_BMASK_SET(H_PORT_CB0_ALL, 0xFFFF),
928 				      cb0);
929 	if (hret != H_SUCCESS) {
930 		ret = -EIO;
931 		goto out_free;
932 	}
933 
934 	/* MAC address */
935 	port->mac_addr = cb0->port_mac_addr << 16;
936 
937 	if (!is_valid_ether_addr((u8 *)&port->mac_addr)) {
938 		ret = -EADDRNOTAVAIL;
939 		goto out_free;
940 	}
941 
942 	/* Port speed */
943 	switch (cb0->port_speed) {
944 	case H_SPEED_10M_H:
945 		port->port_speed = EHEA_SPEED_10M;
946 		port->full_duplex = 0;
947 		break;
948 	case H_SPEED_10M_F:
949 		port->port_speed = EHEA_SPEED_10M;
950 		port->full_duplex = 1;
951 		break;
952 	case H_SPEED_100M_H:
953 		port->port_speed = EHEA_SPEED_100M;
954 		port->full_duplex = 0;
955 		break;
956 	case H_SPEED_100M_F:
957 		port->port_speed = EHEA_SPEED_100M;
958 		port->full_duplex = 1;
959 		break;
960 	case H_SPEED_1G_F:
961 		port->port_speed = EHEA_SPEED_1G;
962 		port->full_duplex = 1;
963 		break;
964 	case H_SPEED_10G_F:
965 		port->port_speed = EHEA_SPEED_10G;
966 		port->full_duplex = 1;
967 		break;
968 	default:
969 		port->port_speed = 0;
970 		port->full_duplex = 0;
971 		break;
972 	}
973 
974 	port->autoneg = 1;
975 	port->num_mcs = cb0->num_default_qps;
976 
977 	/* Number of default QPs */
978 	if (use_mcs)
979 		port->num_def_qps = cb0->num_default_qps;
980 	else
981 		port->num_def_qps = 1;
982 
983 	if (!port->num_def_qps) {
984 		ret = -EINVAL;
985 		goto out_free;
986 	}
987 
988 	port->num_tx_qps = num_tx_qps;
989 
990 	if (port->num_def_qps >= port->num_tx_qps)
991 		port->num_add_tx_qps = 0;
992 	else
993 		port->num_add_tx_qps = port->num_tx_qps - port->num_def_qps;
994 
995 	ret = 0;
996 out_free:
997 	if (ret || netif_msg_probe(port))
998 		ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr");
999 	kfree(cb0);
1000 out:
1001 	return ret;
1002 }
1003 
ehea_set_portspeed(struct ehea_port * port,u32 port_speed)1004 int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
1005 {
1006 	struct hcp_ehea_port_cb4 *cb4;
1007 	u64 hret;
1008 	int ret = 0;
1009 
1010 	cb4 = kzalloc(PAGE_SIZE, GFP_KERNEL);
1011 	if (!cb4) {
1012 		ehea_error("no mem for cb4");
1013 		ret = -ENOMEM;
1014 		goto out;
1015 	}
1016 
1017 	cb4->port_speed = port_speed;
1018 
1019 	netif_carrier_off(port->netdev);
1020 
1021 	hret = ehea_h_modify_ehea_port(port->adapter->handle,
1022 				       port->logical_port_id,
1023 				       H_PORT_CB4, H_PORT_CB4_SPEED, cb4);
1024 	if (hret == H_SUCCESS) {
1025 		port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0;
1026 
1027 		hret = ehea_h_query_ehea_port(port->adapter->handle,
1028 					      port->logical_port_id,
1029 					      H_PORT_CB4, H_PORT_CB4_SPEED,
1030 					      cb4);
1031 		if (hret == H_SUCCESS) {
1032 			switch (cb4->port_speed) {
1033 			case H_SPEED_10M_H:
1034 				port->port_speed = EHEA_SPEED_10M;
1035 				port->full_duplex = 0;
1036 				break;
1037 			case H_SPEED_10M_F:
1038 				port->port_speed = EHEA_SPEED_10M;
1039 				port->full_duplex = 1;
1040 				break;
1041 			case H_SPEED_100M_H:
1042 				port->port_speed = EHEA_SPEED_100M;
1043 				port->full_duplex = 0;
1044 				break;
1045 			case H_SPEED_100M_F:
1046 				port->port_speed = EHEA_SPEED_100M;
1047 				port->full_duplex = 1;
1048 				break;
1049 			case H_SPEED_1G_F:
1050 				port->port_speed = EHEA_SPEED_1G;
1051 				port->full_duplex = 1;
1052 				break;
1053 			case H_SPEED_10G_F:
1054 				port->port_speed = EHEA_SPEED_10G;
1055 				port->full_duplex = 1;
1056 				break;
1057 			default:
1058 				port->port_speed = 0;
1059 				port->full_duplex = 0;
1060 				break;
1061 			}
1062 		} else {
1063 			ehea_error("Failed sensing port speed");
1064 			ret = -EIO;
1065 		}
1066 	} else {
1067 		if (hret == H_AUTHORITY) {
1068 			ehea_info("Hypervisor denied setting port speed");
1069 			ret = -EPERM;
1070 		} else {
1071 			ret = -EIO;
1072 			ehea_error("Failed setting port speed");
1073 		}
1074 	}
1075 	if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP))
1076 		netif_carrier_on(port->netdev);
1077 
1078 	kfree(cb4);
1079 out:
1080 	return ret;
1081 }
1082 
ehea_parse_eqe(struct ehea_adapter * adapter,u64 eqe)1083 static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
1084 {
1085 	int ret;
1086 	u8 ec;
1087 	u8 portnum;
1088 	struct ehea_port *port;
1089 
1090 	ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
1091 	portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
1092 	port = ehea_get_port(adapter, portnum);
1093 
1094 	switch (ec) {
1095 	case EHEA_EC_PORTSTATE_CHG:	/* port state change */
1096 
1097 		if (!port) {
1098 			ehea_error("unknown portnum %x", portnum);
1099 			break;
1100 		}
1101 
1102 		if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
1103 			if (!netif_carrier_ok(port->netdev)) {
1104 				ret = ehea_sense_port_attr(port);
1105 				if (ret) {
1106 					ehea_error("failed resensing port "
1107 						   "attributes");
1108 					break;
1109 				}
1110 
1111 				if (netif_msg_link(port))
1112 					ehea_info("%s: Logical port up: %dMbps "
1113 						  "%s Duplex",
1114 						  port->netdev->name,
1115 						  port->port_speed,
1116 						  port->full_duplex ==
1117 						  1 ? "Full" : "Half");
1118 
1119 				netif_carrier_on(port->netdev);
1120 				netif_wake_queue(port->netdev);
1121 			}
1122 		} else
1123 			if (netif_carrier_ok(port->netdev)) {
1124 				if (netif_msg_link(port))
1125 					ehea_info("%s: Logical port down",
1126 						  port->netdev->name);
1127 				netif_carrier_off(port->netdev);
1128 				netif_stop_queue(port->netdev);
1129 			}
1130 
1131 		if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
1132 			port->phy_link = EHEA_PHY_LINK_UP;
1133 			if (netif_msg_link(port))
1134 				ehea_info("%s: Physical port up",
1135 					  port->netdev->name);
1136 			if (prop_carrier_state)
1137 				netif_carrier_on(port->netdev);
1138 		} else {
1139 			port->phy_link = EHEA_PHY_LINK_DOWN;
1140 			if (netif_msg_link(port))
1141 				ehea_info("%s: Physical port down",
1142 					  port->netdev->name);
1143 			if (prop_carrier_state)
1144 				netif_carrier_off(port->netdev);
1145 		}
1146 
1147 		if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe))
1148 			ehea_info("External switch port is primary port");
1149 		else
1150 			ehea_info("External switch port is backup port");
1151 
1152 		break;
1153 	case EHEA_EC_ADAPTER_MALFUNC:
1154 		ehea_error("Adapter malfunction");
1155 		break;
1156 	case EHEA_EC_PORT_MALFUNC:
1157 		ehea_info("Port malfunction: Device: %s", port->netdev->name);
1158 		netif_carrier_off(port->netdev);
1159 		netif_stop_queue(port->netdev);
1160 		break;
1161 	default:
1162 		ehea_error("unknown event code %x, eqe=0x%llX", ec, eqe);
1163 		break;
1164 	}
1165 }
1166 
ehea_neq_tasklet(unsigned long data)1167 static void ehea_neq_tasklet(unsigned long data)
1168 {
1169 	struct ehea_adapter *adapter = (struct ehea_adapter *)data;
1170 	struct ehea_eqe *eqe;
1171 	u64 event_mask;
1172 
1173 	eqe = ehea_poll_eq(adapter->neq);
1174 	ehea_debug("eqe=%p", eqe);
1175 
1176 	while (eqe) {
1177 		ehea_debug("*eqe=%lx", eqe->entry);
1178 		ehea_parse_eqe(adapter, eqe->entry);
1179 		eqe = ehea_poll_eq(adapter->neq);
1180 		ehea_debug("next eqe=%p", eqe);
1181 	}
1182 
1183 	event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1)
1184 		   | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC, 1)
1185 		   | EHEA_BMASK_SET(NELR_PORT_MALFUNC, 1);
1186 
1187 	ehea_h_reset_events(adapter->handle,
1188 			    adapter->neq->fw_handle, event_mask);
1189 }
1190 
ehea_interrupt_neq(int irq,void * param)1191 static irqreturn_t ehea_interrupt_neq(int irq, void *param)
1192 {
1193 	struct ehea_adapter *adapter = param;
1194 	tasklet_hi_schedule(&adapter->neq_tasklet);
1195 	return IRQ_HANDLED;
1196 }
1197 
1198 
ehea_fill_port_res(struct ehea_port_res * pr)1199 static int ehea_fill_port_res(struct ehea_port_res *pr)
1200 {
1201 	int ret;
1202 	struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
1203 
1204 	ret = ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1
1205 				     - init_attr->act_nr_rwqes_rq2
1206 				     - init_attr->act_nr_rwqes_rq3 - 1);
1207 
1208 	ret |= ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
1209 
1210 	ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1);
1211 
1212 	return ret;
1213 }
1214 
ehea_reg_interrupts(struct net_device * dev)1215 static int ehea_reg_interrupts(struct net_device *dev)
1216 {
1217 	struct ehea_port *port = netdev_priv(dev);
1218 	struct ehea_port_res *pr;
1219 	int i, ret;
1220 
1221 
1222 	snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff",
1223 		 dev->name);
1224 
1225 	ret = ibmebus_request_irq(port->qp_eq->attr.ist1,
1226 				  ehea_qp_aff_irq_handler,
1227 				  IRQF_DISABLED, port->int_aff_name, port);
1228 	if (ret) {
1229 		ehea_error("failed registering irq for qp_aff_irq_handler:"
1230 			   "ist=%X", port->qp_eq->attr.ist1);
1231 		goto out_free_qpeq;
1232 	}
1233 
1234 	if (netif_msg_ifup(port))
1235 		ehea_info("irq_handle 0x%X for function qp_aff_irq_handler "
1236 			  "registered", port->qp_eq->attr.ist1);
1237 
1238 
1239 	for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
1240 		pr = &port->port_res[i];
1241 		snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1,
1242 			 "%s-queue%d", dev->name, i);
1243 		ret = ibmebus_request_irq(pr->eq->attr.ist1,
1244 					  ehea_recv_irq_handler,
1245 					  IRQF_DISABLED, pr->int_send_name,
1246 					  pr);
1247 		if (ret) {
1248 			ehea_error("failed registering irq for ehea_queue "
1249 				   "port_res_nr:%d, ist=%X", i,
1250 				   pr->eq->attr.ist1);
1251 			goto out_free_req;
1252 		}
1253 		if (netif_msg_ifup(port))
1254 			ehea_info("irq_handle 0x%X for function ehea_queue_int "
1255 				  "%d registered", pr->eq->attr.ist1, i);
1256 	}
1257 out:
1258 	return ret;
1259 
1260 
1261 out_free_req:
1262 	while (--i >= 0) {
1263 		u32 ist = port->port_res[i].eq->attr.ist1;
1264 		ibmebus_free_irq(ist, &port->port_res[i]);
1265 	}
1266 
1267 out_free_qpeq:
1268 	ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1269 	i = port->num_def_qps;
1270 
1271 	goto out;
1272 
1273 }
1274 
ehea_free_interrupts(struct net_device * dev)1275 static void ehea_free_interrupts(struct net_device *dev)
1276 {
1277 	struct ehea_port *port = netdev_priv(dev);
1278 	struct ehea_port_res *pr;
1279 	int i;
1280 
1281 	/* send */
1282 
1283 	for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
1284 		pr = &port->port_res[i];
1285 		ibmebus_free_irq(pr->eq->attr.ist1, pr);
1286 		if (netif_msg_intr(port))
1287 			ehea_info("free send irq for res %d with handle 0x%X",
1288 				  i, pr->eq->attr.ist1);
1289 	}
1290 
1291 	/* associated events */
1292 	ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1293 	if (netif_msg_intr(port))
1294 		ehea_info("associated event interrupt for handle 0x%X freed",
1295 			  port->qp_eq->attr.ist1);
1296 }
1297 
ehea_configure_port(struct ehea_port * port)1298 static int ehea_configure_port(struct ehea_port *port)
1299 {
1300 	int ret, i;
1301 	u64 hret, mask;
1302 	struct hcp_ehea_port_cb0 *cb0;
1303 
1304 	ret = -ENOMEM;
1305 	cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
1306 	if (!cb0)
1307 		goto out;
1308 
1309 	cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1)
1310 		     | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM, 1)
1311 		     | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM, 1)
1312 		     | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT, 1)
1313 		     | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER,
1314 				      PXLY_RC_VLAN_FILTER)
1315 		     | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1);
1316 
1317 	for (i = 0; i < port->num_mcs; i++)
1318 		if (use_mcs)
1319 			cb0->default_qpn_arr[i] =
1320 				port->port_res[i].qp->init_attr.qp_nr;
1321 		else
1322 			cb0->default_qpn_arr[i] =
1323 				port->port_res[0].qp->init_attr.qp_nr;
1324 
1325 	if (netif_msg_ifup(port))
1326 		ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port");
1327 
1328 	mask = EHEA_BMASK_SET(H_PORT_CB0_PRC, 1)
1329 	     | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY, 1);
1330 
1331 	hret = ehea_h_modify_ehea_port(port->adapter->handle,
1332 				       port->logical_port_id,
1333 				       H_PORT_CB0, mask, cb0);
1334 	ret = -EIO;
1335 	if (hret != H_SUCCESS)
1336 		goto out_free;
1337 
1338 	ret = 0;
1339 
1340 out_free:
1341 	kfree(cb0);
1342 out:
1343 	return ret;
1344 }
1345 
ehea_gen_smrs(struct ehea_port_res * pr)1346 int ehea_gen_smrs(struct ehea_port_res *pr)
1347 {
1348 	int ret;
1349 	struct ehea_adapter *adapter = pr->port->adapter;
1350 
1351 	ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr);
1352 	if (ret)
1353 		goto out;
1354 
1355 	ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr);
1356 	if (ret)
1357 		goto out_free;
1358 
1359 	return 0;
1360 
1361 out_free:
1362 	ehea_rem_mr(&pr->send_mr);
1363 out:
1364 	ehea_error("Generating SMRS failed\n");
1365 	return -EIO;
1366 }
1367 
ehea_rem_smrs(struct ehea_port_res * pr)1368 int ehea_rem_smrs(struct ehea_port_res *pr)
1369 {
1370 	if ((ehea_rem_mr(&pr->send_mr))
1371 	    || (ehea_rem_mr(&pr->recv_mr)))
1372 		return -EIO;
1373 	else
1374 		return 0;
1375 }
1376 
ehea_init_q_skba(struct ehea_q_skb_arr * q_skba,int max_q_entries)1377 static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
1378 {
1379 	int arr_size = sizeof(void *) * max_q_entries;
1380 
1381 	q_skba->arr = vmalloc(arr_size);
1382 	if (!q_skba->arr)
1383 		return -ENOMEM;
1384 
1385 	memset(q_skba->arr, 0, arr_size);
1386 
1387 	q_skba->len = max_q_entries;
1388 	q_skba->index = 0;
1389 	q_skba->os_skbs = 0;
1390 
1391 	return 0;
1392 }
1393 
ehea_init_port_res(struct ehea_port * port,struct ehea_port_res * pr,struct port_res_cfg * pr_cfg,int queue_token)1394 static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1395 			      struct port_res_cfg *pr_cfg, int queue_token)
1396 {
1397 	struct ehea_adapter *adapter = port->adapter;
1398 	enum ehea_eq_type eq_type = EHEA_EQ;
1399 	struct ehea_qp_init_attr *init_attr = NULL;
1400 	int ret = -EIO;
1401 
1402 	memset(pr, 0, sizeof(struct ehea_port_res));
1403 
1404 	pr->port = port;
1405 	spin_lock_init(&pr->xmit_lock);
1406 	spin_lock_init(&pr->netif_queue);
1407 
1408 	pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
1409 	if (!pr->eq) {
1410 		ehea_error("create_eq failed (eq)");
1411 		goto out_free;
1412 	}
1413 
1414 	pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq,
1415 				     pr->eq->fw_handle,
1416 				     port->logical_port_id);
1417 	if (!pr->recv_cq) {
1418 		ehea_error("create_cq failed (cq_recv)");
1419 		goto out_free;
1420 	}
1421 
1422 	pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq,
1423 				     pr->eq->fw_handle,
1424 				     port->logical_port_id);
1425 	if (!pr->send_cq) {
1426 		ehea_error("create_cq failed (cq_send)");
1427 		goto out_free;
1428 	}
1429 
1430 	if (netif_msg_ifup(port))
1431 		ehea_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d",
1432 			  pr->send_cq->attr.act_nr_of_cqes,
1433 			  pr->recv_cq->attr.act_nr_of_cqes);
1434 
1435 	init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
1436 	if (!init_attr) {
1437 		ret = -ENOMEM;
1438 		ehea_error("no mem for ehea_qp_init_attr");
1439 		goto out_free;
1440 	}
1441 
1442 	init_attr->low_lat_rq1 = 1;
1443 	init_attr->signalingtype = 1;	/* generate CQE if specified in WQE */
1444 	init_attr->rq_count = 3;
1445 	init_attr->qp_token = queue_token;
1446 	init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq;
1447 	init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1;
1448 	init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2;
1449 	init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3;
1450 	init_attr->wqe_size_enc_sq = EHEA_SG_SQ;
1451 	init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1;
1452 	init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2;
1453 	init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3;
1454 	init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD;
1455 	init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD;
1456 	init_attr->port_nr = port->logical_port_id;
1457 	init_attr->send_cq_handle = pr->send_cq->fw_handle;
1458 	init_attr->recv_cq_handle = pr->recv_cq->fw_handle;
1459 	init_attr->aff_eq_handle = port->qp_eq->fw_handle;
1460 
1461 	pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr);
1462 	if (!pr->qp) {
1463 		ehea_error("create_qp failed");
1464 		ret = -EIO;
1465 		goto out_free;
1466 	}
1467 
1468 	if (netif_msg_ifup(port))
1469 		ehea_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n "
1470 			  "nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d", init_attr->qp_nr,
1471 			  init_attr->act_nr_send_wqes,
1472 			  init_attr->act_nr_rwqes_rq1,
1473 			  init_attr->act_nr_rwqes_rq2,
1474 			  init_attr->act_nr_rwqes_rq3);
1475 
1476 	pr->sq_skba_size = init_attr->act_nr_send_wqes + 1;
1477 
1478 	ret = ehea_init_q_skba(&pr->sq_skba, pr->sq_skba_size);
1479 	ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1);
1480 	ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1);
1481 	ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1);
1482 	if (ret)
1483 		goto out_free;
1484 
1485 	pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10;
1486 	if (ehea_gen_smrs(pr) != 0) {
1487 		ret = -EIO;
1488 		goto out_free;
1489 	}
1490 
1491 	atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1);
1492 
1493 	kfree(init_attr);
1494 
1495 	netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64);
1496 
1497 	pr->lro_mgr.max_aggr = pr->port->lro_max_aggr;
1498 	pr->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS;
1499 	pr->lro_mgr.lro_arr = pr->lro_desc;
1500 	pr->lro_mgr.get_skb_header = get_skb_hdr;
1501 	pr->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
1502 	pr->lro_mgr.dev = port->netdev;
1503 	pr->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
1504 	pr->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1505 
1506 	ret = 0;
1507 	goto out;
1508 
1509 out_free:
1510 	kfree(init_attr);
1511 	vfree(pr->sq_skba.arr);
1512 	vfree(pr->rq1_skba.arr);
1513 	vfree(pr->rq2_skba.arr);
1514 	vfree(pr->rq3_skba.arr);
1515 	ehea_destroy_qp(pr->qp);
1516 	ehea_destroy_cq(pr->send_cq);
1517 	ehea_destroy_cq(pr->recv_cq);
1518 	ehea_destroy_eq(pr->eq);
1519 out:
1520 	return ret;
1521 }
1522 
ehea_clean_portres(struct ehea_port * port,struct ehea_port_res * pr)1523 static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
1524 {
1525 	int ret, i;
1526 
1527 	ret = ehea_destroy_qp(pr->qp);
1528 
1529 	if (!ret) {
1530 		ehea_destroy_cq(pr->send_cq);
1531 		ehea_destroy_cq(pr->recv_cq);
1532 		ehea_destroy_eq(pr->eq);
1533 
1534 		for (i = 0; i < pr->rq1_skba.len; i++)
1535 			if (pr->rq1_skba.arr[i])
1536 				dev_kfree_skb(pr->rq1_skba.arr[i]);
1537 
1538 		for (i = 0; i < pr->rq2_skba.len; i++)
1539 			if (pr->rq2_skba.arr[i])
1540 				dev_kfree_skb(pr->rq2_skba.arr[i]);
1541 
1542 		for (i = 0; i < pr->rq3_skba.len; i++)
1543 			if (pr->rq3_skba.arr[i])
1544 				dev_kfree_skb(pr->rq3_skba.arr[i]);
1545 
1546 		for (i = 0; i < pr->sq_skba.len; i++)
1547 			if (pr->sq_skba.arr[i])
1548 				dev_kfree_skb(pr->sq_skba.arr[i]);
1549 
1550 		vfree(pr->rq1_skba.arr);
1551 		vfree(pr->rq2_skba.arr);
1552 		vfree(pr->rq3_skba.arr);
1553 		vfree(pr->sq_skba.arr);
1554 		ret = ehea_rem_smrs(pr);
1555 	}
1556 	return ret;
1557 }
1558 
1559 /*
1560  * The write_* functions store information in swqe which is used by
1561  * the hardware to calculate the ip/tcp/udp checksum
1562  */
1563 
write_ip_start_end(struct ehea_swqe * swqe,const struct sk_buff * skb)1564 static inline void write_ip_start_end(struct ehea_swqe *swqe,
1565 				      const struct sk_buff *skb)
1566 {
1567 	swqe->ip_start = skb_network_offset(skb);
1568 	swqe->ip_end = (u8)(swqe->ip_start + ip_hdrlen(skb) - 1);
1569 }
1570 
write_tcp_offset_end(struct ehea_swqe * swqe,const struct sk_buff * skb)1571 static inline void write_tcp_offset_end(struct ehea_swqe *swqe,
1572 					const struct sk_buff *skb)
1573 {
1574 	swqe->tcp_offset =
1575 		(u8)(swqe->ip_end + 1 + offsetof(struct tcphdr, check));
1576 
1577 	swqe->tcp_end = (u16)skb->len - 1;
1578 }
1579 
write_udp_offset_end(struct ehea_swqe * swqe,const struct sk_buff * skb)1580 static inline void write_udp_offset_end(struct ehea_swqe *swqe,
1581 					const struct sk_buff *skb)
1582 {
1583 	swqe->tcp_offset =
1584 		(u8)(swqe->ip_end + 1 + offsetof(struct udphdr, check));
1585 
1586 	swqe->tcp_end = (u16)skb->len - 1;
1587 }
1588 
1589 
write_swqe2_TSO(struct sk_buff * skb,struct ehea_swqe * swqe,u32 lkey)1590 static void write_swqe2_TSO(struct sk_buff *skb,
1591 			    struct ehea_swqe *swqe, u32 lkey)
1592 {
1593 	struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1594 	u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1595 	int skb_data_size = skb->len - skb->data_len;
1596 	int headersize;
1597 
1598 	/* Packet is TCP with TSO enabled */
1599 	swqe->tx_control |= EHEA_SWQE_TSO;
1600 	swqe->mss = skb_shinfo(skb)->gso_size;
1601 	/* copy only eth/ip/tcp headers to immediate data and
1602 	 * the rest of skb->data to sg1entry
1603 	 */
1604 	headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
1605 
1606 	skb_data_size = skb->len - skb->data_len;
1607 
1608 	if (skb_data_size >= headersize) {
1609 		/* copy immediate data */
1610 		skb_copy_from_linear_data(skb, imm_data, headersize);
1611 		swqe->immediate_data_length = headersize;
1612 
1613 		if (skb_data_size > headersize) {
1614 			/* set sg1entry data */
1615 			sg1entry->l_key = lkey;
1616 			sg1entry->len = skb_data_size - headersize;
1617 			sg1entry->vaddr =
1618 				ehea_map_vaddr(skb->data + headersize);
1619 			swqe->descriptors++;
1620 		}
1621 	} else
1622 		ehea_error("cannot handle fragmented headers");
1623 }
1624 
write_swqe2_nonTSO(struct sk_buff * skb,struct ehea_swqe * swqe,u32 lkey)1625 static void write_swqe2_nonTSO(struct sk_buff *skb,
1626 			       struct ehea_swqe *swqe, u32 lkey)
1627 {
1628 	int skb_data_size = skb->len - skb->data_len;
1629 	u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1630 	struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1631 
1632 	/* Packet is any nonTSO type
1633 	 *
1634 	 * Copy as much as possible skb->data to immediate data and
1635 	 * the rest to sg1entry
1636 	 */
1637 	if (skb_data_size >= SWQE2_MAX_IMM) {
1638 		/* copy immediate data */
1639 		skb_copy_from_linear_data(skb, imm_data, SWQE2_MAX_IMM);
1640 
1641 		swqe->immediate_data_length = SWQE2_MAX_IMM;
1642 
1643 		if (skb_data_size > SWQE2_MAX_IMM) {
1644 			/* copy sg1entry data */
1645 			sg1entry->l_key = lkey;
1646 			sg1entry->len = skb_data_size - SWQE2_MAX_IMM;
1647 			sg1entry->vaddr =
1648 				ehea_map_vaddr(skb->data + SWQE2_MAX_IMM);
1649 			swqe->descriptors++;
1650 		}
1651 	} else {
1652 		skb_copy_from_linear_data(skb, imm_data, skb_data_size);
1653 		swqe->immediate_data_length = skb_data_size;
1654 	}
1655 }
1656 
write_swqe2_data(struct sk_buff * skb,struct net_device * dev,struct ehea_swqe * swqe,u32 lkey)1657 static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
1658 				    struct ehea_swqe *swqe, u32 lkey)
1659 {
1660 	struct ehea_vsgentry *sg_list, *sg1entry, *sgentry;
1661 	skb_frag_t *frag;
1662 	int nfrags, sg1entry_contains_frag_data, i;
1663 
1664 	nfrags = skb_shinfo(skb)->nr_frags;
1665 	sg1entry = &swqe->u.immdata_desc.sg_entry;
1666 	sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list;
1667 	swqe->descriptors = 0;
1668 	sg1entry_contains_frag_data = 0;
1669 
1670 	if ((dev->features & NETIF_F_TSO) && skb_shinfo(skb)->gso_size)
1671 		write_swqe2_TSO(skb, swqe, lkey);
1672 	else
1673 		write_swqe2_nonTSO(skb, swqe, lkey);
1674 
1675 	/* write descriptors */
1676 	if (nfrags > 0) {
1677 		if (swqe->descriptors == 0) {
1678 			/* sg1entry not yet used */
1679 			frag = &skb_shinfo(skb)->frags[0];
1680 
1681 			/* copy sg1entry data */
1682 			sg1entry->l_key = lkey;
1683 			sg1entry->len = frag->size;
1684 			sg1entry->vaddr =
1685 				ehea_map_vaddr(page_address(frag->page)
1686 					       + frag->page_offset);
1687 			swqe->descriptors++;
1688 			sg1entry_contains_frag_data = 1;
1689 		}
1690 
1691 		for (i = sg1entry_contains_frag_data; i < nfrags; i++) {
1692 
1693 			frag = &skb_shinfo(skb)->frags[i];
1694 			sgentry = &sg_list[i - sg1entry_contains_frag_data];
1695 
1696 			sgentry->l_key = lkey;
1697 			sgentry->len = frag->size;
1698 			sgentry->vaddr =
1699 				ehea_map_vaddr(page_address(frag->page)
1700 					       + frag->page_offset);
1701 			swqe->descriptors++;
1702 		}
1703 	}
1704 }
1705 
ehea_broadcast_reg_helper(struct ehea_port * port,u32 hcallid)1706 static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
1707 {
1708 	int ret = 0;
1709 	u64 hret;
1710 	u8 reg_type;
1711 
1712 	/* De/Register untagged packets */
1713 	reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_UNTAGGED;
1714 	hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1715 				     port->logical_port_id,
1716 				     reg_type, port->mac_addr, 0, hcallid);
1717 	if (hret != H_SUCCESS) {
1718 		ehea_error("%sregistering bc address failed (tagged)",
1719 			   hcallid == H_REG_BCMC ? "" : "de");
1720 		ret = -EIO;
1721 		goto out_herr;
1722 	}
1723 
1724 	/* De/Register VLAN packets */
1725 	reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_VLANID_ALL;
1726 	hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1727 				     port->logical_port_id,
1728 				     reg_type, port->mac_addr, 0, hcallid);
1729 	if (hret != H_SUCCESS) {
1730 		ehea_error("%sregistering bc address failed (vlan)",
1731 			   hcallid == H_REG_BCMC ? "" : "de");
1732 		ret = -EIO;
1733 	}
1734 out_herr:
1735 	return ret;
1736 }
1737 
ehea_set_mac_addr(struct net_device * dev,void * sa)1738 static int ehea_set_mac_addr(struct net_device *dev, void *sa)
1739 {
1740 	struct ehea_port *port = netdev_priv(dev);
1741 	struct sockaddr *mac_addr = sa;
1742 	struct hcp_ehea_port_cb0 *cb0;
1743 	int ret;
1744 	u64 hret;
1745 
1746 	if (!is_valid_ether_addr(mac_addr->sa_data)) {
1747 		ret = -EADDRNOTAVAIL;
1748 		goto out;
1749 	}
1750 
1751 	cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
1752 	if (!cb0) {
1753 		ehea_error("no mem for cb0");
1754 		ret = -ENOMEM;
1755 		goto out;
1756 	}
1757 
1758 	memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN);
1759 
1760 	cb0->port_mac_addr = cb0->port_mac_addr >> 16;
1761 
1762 	hret = ehea_h_modify_ehea_port(port->adapter->handle,
1763 				       port->logical_port_id, H_PORT_CB0,
1764 				       EHEA_BMASK_SET(H_PORT_CB0_MAC, 1), cb0);
1765 	if (hret != H_SUCCESS) {
1766 		ret = -EIO;
1767 		goto out_free;
1768 	}
1769 
1770 	memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
1771 
1772 	spin_lock(&ehea_bcmc_regs.lock);
1773 
1774 	/* Deregister old MAC in pHYP */
1775 	if (port->state == EHEA_PORT_UP) {
1776 		ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
1777 		if (ret)
1778 			goto out_upregs;
1779 	}
1780 
1781 	port->mac_addr = cb0->port_mac_addr << 16;
1782 
1783 	/* Register new MAC in pHYP */
1784 	if (port->state == EHEA_PORT_UP) {
1785 		ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
1786 		if (ret)
1787 			goto out_upregs;
1788 	}
1789 
1790 	ret = 0;
1791 
1792 out_upregs:
1793 	ehea_update_bcmc_registrations();
1794 	spin_unlock(&ehea_bcmc_regs.lock);
1795 out_free:
1796 	kfree(cb0);
1797 out:
1798 	return ret;
1799 }
1800 
ehea_promiscuous_error(u64 hret,int enable)1801 static void ehea_promiscuous_error(u64 hret, int enable)
1802 {
1803 	if (hret == H_AUTHORITY)
1804 		ehea_info("Hypervisor denied %sabling promiscuous mode",
1805 			  enable == 1 ? "en" : "dis");
1806 	else
1807 		ehea_error("failed %sabling promiscuous mode",
1808 			   enable == 1 ? "en" : "dis");
1809 }
1810 
ehea_promiscuous(struct net_device * dev,int enable)1811 static void ehea_promiscuous(struct net_device *dev, int enable)
1812 {
1813 	struct ehea_port *port = netdev_priv(dev);
1814 	struct hcp_ehea_port_cb7 *cb7;
1815 	u64 hret;
1816 
1817 	if ((enable && port->promisc) || (!enable && !port->promisc))
1818 		return;
1819 
1820 	cb7 = kzalloc(PAGE_SIZE, GFP_ATOMIC);
1821 	if (!cb7) {
1822 		ehea_error("no mem for cb7");
1823 		goto out;
1824 	}
1825 
1826 	/* Modify Pxs_DUCQPN in CB7 */
1827 	cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0;
1828 
1829 	hret = ehea_h_modify_ehea_port(port->adapter->handle,
1830 				       port->logical_port_id,
1831 				       H_PORT_CB7, H_PORT_CB7_DUCQPN, cb7);
1832 	if (hret) {
1833 		ehea_promiscuous_error(hret, enable);
1834 		goto out;
1835 	}
1836 
1837 	port->promisc = enable;
1838 out:
1839 	kfree(cb7);
1840 	return;
1841 }
1842 
ehea_multicast_reg_helper(struct ehea_port * port,u64 mc_mac_addr,u32 hcallid)1843 static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
1844 				     u32 hcallid)
1845 {
1846 	u64 hret;
1847 	u8 reg_type;
1848 
1849 	reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
1850 		 | EHEA_BCMC_UNTAGGED;
1851 
1852 	hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1853 				     port->logical_port_id,
1854 				     reg_type, mc_mac_addr, 0, hcallid);
1855 	if (hret)
1856 		goto out;
1857 
1858 	reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
1859 		 | EHEA_BCMC_VLANID_ALL;
1860 
1861 	hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1862 				     port->logical_port_id,
1863 				     reg_type, mc_mac_addr, 0, hcallid);
1864 out:
1865 	return hret;
1866 }
1867 
ehea_drop_multicast_list(struct net_device * dev)1868 static int ehea_drop_multicast_list(struct net_device *dev)
1869 {
1870 	struct ehea_port *port = netdev_priv(dev);
1871 	struct ehea_mc_list *mc_entry = port->mc_list;
1872 	struct list_head *pos;
1873 	struct list_head *temp;
1874 	int ret = 0;
1875 	u64 hret;
1876 
1877 	list_for_each_safe(pos, temp, &(port->mc_list->list)) {
1878 		mc_entry = list_entry(pos, struct ehea_mc_list, list);
1879 
1880 		hret = ehea_multicast_reg_helper(port, mc_entry->macaddr,
1881 						 H_DEREG_BCMC);
1882 		if (hret) {
1883 			ehea_error("failed deregistering mcast MAC");
1884 			ret = -EIO;
1885 		}
1886 
1887 		list_del(pos);
1888 		kfree(mc_entry);
1889 	}
1890 	return ret;
1891 }
1892 
ehea_allmulti(struct net_device * dev,int enable)1893 static void ehea_allmulti(struct net_device *dev, int enable)
1894 {
1895 	struct ehea_port *port = netdev_priv(dev);
1896 	u64 hret;
1897 
1898 	if (!port->allmulti) {
1899 		if (enable) {
1900 			/* Enable ALLMULTI */
1901 			ehea_drop_multicast_list(dev);
1902 			hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC);
1903 			if (!hret)
1904 				port->allmulti = 1;
1905 			else
1906 				ehea_error("failed enabling IFF_ALLMULTI");
1907 		}
1908 	} else
1909 		if (!enable) {
1910 			/* Disable ALLMULTI */
1911 			hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC);
1912 			if (!hret)
1913 				port->allmulti = 0;
1914 			else
1915 				ehea_error("failed disabling IFF_ALLMULTI");
1916 		}
1917 }
1918 
ehea_add_multicast_entry(struct ehea_port * port,u8 * mc_mac_addr)1919 static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
1920 {
1921 	struct ehea_mc_list *ehea_mcl_entry;
1922 	u64 hret;
1923 
1924 	ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
1925 	if (!ehea_mcl_entry) {
1926 		ehea_error("no mem for mcl_entry");
1927 		return;
1928 	}
1929 
1930 	INIT_LIST_HEAD(&ehea_mcl_entry->list);
1931 
1932 	memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN);
1933 
1934 	hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr,
1935 					 H_REG_BCMC);
1936 	if (!hret)
1937 		list_add(&ehea_mcl_entry->list, &port->mc_list->list);
1938 	else {
1939 		ehea_error("failed registering mcast MAC");
1940 		kfree(ehea_mcl_entry);
1941 	}
1942 }
1943 
ehea_set_multicast_list(struct net_device * dev)1944 static void ehea_set_multicast_list(struct net_device *dev)
1945 {
1946 	struct ehea_port *port = netdev_priv(dev);
1947 	struct dev_mc_list *k_mcl_entry;
1948 	int ret, i;
1949 
1950 	if (dev->flags & IFF_PROMISC) {
1951 		ehea_promiscuous(dev, 1);
1952 		return;
1953 	}
1954 	ehea_promiscuous(dev, 0);
1955 
1956 	spin_lock(&ehea_bcmc_regs.lock);
1957 
1958 	if (dev->flags & IFF_ALLMULTI) {
1959 		ehea_allmulti(dev, 1);
1960 		goto out;
1961 	}
1962 	ehea_allmulti(dev, 0);
1963 
1964 	if (dev->mc_count) {
1965 		ret = ehea_drop_multicast_list(dev);
1966 		if (ret) {
1967 			/* Dropping the current multicast list failed.
1968 			 * Enabling ALL_MULTI is the best we can do.
1969 			 */
1970 			ehea_allmulti(dev, 1);
1971 		}
1972 
1973 		if (dev->mc_count > port->adapter->max_mc_mac) {
1974 			ehea_info("Mcast registration limit reached (0x%llx). "
1975 				  "Use ALLMULTI!",
1976 				  port->adapter->max_mc_mac);
1977 			goto out;
1978 		}
1979 
1980 		for (i = 0, k_mcl_entry = dev->mc_list; i < dev->mc_count; i++,
1981 			     k_mcl_entry = k_mcl_entry->next)
1982 			ehea_add_multicast_entry(port, k_mcl_entry->dmi_addr);
1983 
1984 	}
1985 out:
1986 	ehea_update_bcmc_registrations();
1987 	spin_unlock(&ehea_bcmc_regs.lock);
1988 	return;
1989 }
1990 
ehea_change_mtu(struct net_device * dev,int new_mtu)1991 static int ehea_change_mtu(struct net_device *dev, int new_mtu)
1992 {
1993 	if ((new_mtu < 68) || (new_mtu > EHEA_MAX_PACKET_SIZE))
1994 		return -EINVAL;
1995 	dev->mtu = new_mtu;
1996 	return 0;
1997 }
1998 
ehea_xmit2(struct sk_buff * skb,struct net_device * dev,struct ehea_swqe * swqe,u32 lkey)1999 static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
2000 		       struct ehea_swqe *swqe, u32 lkey)
2001 {
2002 	if (skb->protocol == htons(ETH_P_IP)) {
2003 		const struct iphdr *iph = ip_hdr(skb);
2004 
2005 		/* IPv4 */
2006 		swqe->tx_control |= EHEA_SWQE_CRC
2007 				 | EHEA_SWQE_IP_CHECKSUM
2008 				 | EHEA_SWQE_TCP_CHECKSUM
2009 				 | EHEA_SWQE_IMM_DATA_PRESENT
2010 				 | EHEA_SWQE_DESCRIPTORS_PRESENT;
2011 
2012 		write_ip_start_end(swqe, skb);
2013 
2014 		if (iph->protocol == IPPROTO_UDP) {
2015 			if ((iph->frag_off & IP_MF)
2016 			    || (iph->frag_off & IP_OFFSET))
2017 				/* IP fragment, so don't change cs */
2018 				swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM;
2019 			else
2020 				write_udp_offset_end(swqe, skb);
2021 		} else if (iph->protocol == IPPROTO_TCP) {
2022 			write_tcp_offset_end(swqe, skb);
2023 		}
2024 
2025 		/* icmp (big data) and ip segmentation packets (all other ip
2026 		   packets) do not require any special handling */
2027 
2028 	} else {
2029 		/* Other Ethernet Protocol */
2030 		swqe->tx_control |= EHEA_SWQE_CRC
2031 				 | EHEA_SWQE_IMM_DATA_PRESENT
2032 				 | EHEA_SWQE_DESCRIPTORS_PRESENT;
2033 	}
2034 
2035 	write_swqe2_data(skb, dev, swqe, lkey);
2036 }
2037 
ehea_xmit3(struct sk_buff * skb,struct net_device * dev,struct ehea_swqe * swqe)2038 static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
2039 		       struct ehea_swqe *swqe)
2040 {
2041 	int nfrags = skb_shinfo(skb)->nr_frags;
2042 	u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0];
2043 	skb_frag_t *frag;
2044 	int i;
2045 
2046 	if (skb->protocol == htons(ETH_P_IP)) {
2047 		const struct iphdr *iph = ip_hdr(skb);
2048 
2049 		/* IPv4 */
2050 		write_ip_start_end(swqe, skb);
2051 
2052 		if (iph->protocol == IPPROTO_TCP) {
2053 			swqe->tx_control |= EHEA_SWQE_CRC
2054 					 | EHEA_SWQE_IP_CHECKSUM
2055 					 | EHEA_SWQE_TCP_CHECKSUM
2056 					 | EHEA_SWQE_IMM_DATA_PRESENT;
2057 
2058 			write_tcp_offset_end(swqe, skb);
2059 
2060 		} else if (iph->protocol == IPPROTO_UDP) {
2061 			if ((iph->frag_off & IP_MF)
2062 			    || (iph->frag_off & IP_OFFSET))
2063 				/* IP fragment, so don't change cs */
2064 				swqe->tx_control |= EHEA_SWQE_CRC
2065 						 | EHEA_SWQE_IMM_DATA_PRESENT;
2066 			else {
2067 				swqe->tx_control |= EHEA_SWQE_CRC
2068 						 | EHEA_SWQE_IP_CHECKSUM
2069 						 | EHEA_SWQE_TCP_CHECKSUM
2070 						 | EHEA_SWQE_IMM_DATA_PRESENT;
2071 
2072 				write_udp_offset_end(swqe, skb);
2073 			}
2074 		} else {
2075 			/* icmp (big data) and
2076 			   ip segmentation packets (all other ip packets) */
2077 			swqe->tx_control |= EHEA_SWQE_CRC
2078 					 | EHEA_SWQE_IP_CHECKSUM
2079 					 | EHEA_SWQE_IMM_DATA_PRESENT;
2080 		}
2081 	} else {
2082 		/* Other Ethernet Protocol */
2083 		swqe->tx_control |= EHEA_SWQE_CRC | EHEA_SWQE_IMM_DATA_PRESENT;
2084 	}
2085 	/* copy (immediate) data */
2086 	if (nfrags == 0) {
2087 		/* data is in a single piece */
2088 		skb_copy_from_linear_data(skb, imm_data, skb->len);
2089 	} else {
2090 		/* first copy data from the skb->data buffer ... */
2091 		skb_copy_from_linear_data(skb, imm_data,
2092 					  skb->len - skb->data_len);
2093 		imm_data += skb->len - skb->data_len;
2094 
2095 		/* ... then copy data from the fragments */
2096 		for (i = 0; i < nfrags; i++) {
2097 			frag = &skb_shinfo(skb)->frags[i];
2098 			memcpy(imm_data,
2099 			       page_address(frag->page) + frag->page_offset,
2100 			       frag->size);
2101 			imm_data += frag->size;
2102 		}
2103 	}
2104 	swqe->immediate_data_length = skb->len;
2105 	dev_kfree_skb(skb);
2106 }
2107 
ehea_hash_skb(struct sk_buff * skb,int num_qps)2108 static inline int ehea_hash_skb(struct sk_buff *skb, int num_qps)
2109 {
2110 	struct tcphdr *tcp;
2111 	u32 tmp;
2112 
2113 	if ((skb->protocol == htons(ETH_P_IP)) &&
2114 	    (ip_hdr(skb)->protocol == IPPROTO_TCP)) {
2115 		tcp = (struct tcphdr *)(skb_network_header(skb) +
2116 					(ip_hdr(skb)->ihl * 4));
2117 		tmp = (tcp->source + (tcp->dest << 16)) % 31;
2118 		tmp += ip_hdr(skb)->daddr % 31;
2119 		return tmp % num_qps;
2120 	} else
2121 		return 0;
2122 }
2123 
ehea_start_xmit(struct sk_buff * skb,struct net_device * dev)2124 static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
2125 {
2126 	struct ehea_port *port = netdev_priv(dev);
2127 	struct ehea_swqe *swqe;
2128 	unsigned long flags;
2129 	u32 lkey;
2130 	int swqe_index;
2131 	struct ehea_port_res *pr;
2132 
2133 	pr = &port->port_res[ehea_hash_skb(skb, port->num_tx_qps)];
2134 
2135 	if (!spin_trylock(&pr->xmit_lock))
2136 		return NETDEV_TX_BUSY;
2137 
2138 	if (pr->queue_stopped) {
2139 		spin_unlock(&pr->xmit_lock);
2140 		return NETDEV_TX_BUSY;
2141 	}
2142 
2143 	swqe = ehea_get_swqe(pr->qp, &swqe_index);
2144 	memset(swqe, 0, SWQE_HEADER_SIZE);
2145 	atomic_dec(&pr->swqe_avail);
2146 
2147 	if (skb->len <= SWQE3_MAX_IMM) {
2148 		u32 sig_iv = port->sig_comp_iv;
2149 		u32 swqe_num = pr->swqe_id_counter;
2150 		ehea_xmit3(skb, dev, swqe);
2151 		swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE)
2152 			| EHEA_BMASK_SET(EHEA_WR_ID_COUNT, swqe_num);
2153 		if (pr->swqe_ll_count >= (sig_iv - 1)) {
2154 			swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL,
2155 						      sig_iv);
2156 			swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
2157 			pr->swqe_ll_count = 0;
2158 		} else
2159 			pr->swqe_ll_count += 1;
2160 	} else {
2161 		swqe->wr_id =
2162 			EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE)
2163 		      | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter)
2164 		      | EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 1)
2165 		      | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index);
2166 		pr->sq_skba.arr[pr->sq_skba.index] = skb;
2167 
2168 		pr->sq_skba.index++;
2169 		pr->sq_skba.index &= (pr->sq_skba.len - 1);
2170 
2171 		lkey = pr->send_mr.lkey;
2172 		ehea_xmit2(skb, dev, swqe, lkey);
2173 		swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
2174 	}
2175 	pr->swqe_id_counter += 1;
2176 
2177 	if (port->vgrp && vlan_tx_tag_present(skb)) {
2178 		swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
2179 		swqe->vlan_tag = vlan_tx_tag_get(skb);
2180 	}
2181 
2182 	if (netif_msg_tx_queued(port)) {
2183 		ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr);
2184 		ehea_dump(swqe, 512, "swqe");
2185 	}
2186 
2187 	if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
2188 		netif_stop_queue(dev);
2189 		swqe->tx_control |= EHEA_SWQE_PURGE;
2190 	}
2191 
2192 	ehea_post_swqe(pr->qp, swqe);
2193 	pr->tx_packets++;
2194 
2195 	if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
2196 		spin_lock_irqsave(&pr->netif_queue, flags);
2197 		if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
2198 			pr->p_stats.queue_stopped++;
2199 			netif_stop_queue(dev);
2200 			pr->queue_stopped = 1;
2201 		}
2202 		spin_unlock_irqrestore(&pr->netif_queue, flags);
2203 	}
2204 	dev->trans_start = jiffies;
2205 	spin_unlock(&pr->xmit_lock);
2206 
2207 	return NETDEV_TX_OK;
2208 }
2209 
ehea_vlan_rx_register(struct net_device * dev,struct vlan_group * grp)2210 static void ehea_vlan_rx_register(struct net_device *dev,
2211 				  struct vlan_group *grp)
2212 {
2213 	struct ehea_port *port = netdev_priv(dev);
2214 	struct ehea_adapter *adapter = port->adapter;
2215 	struct hcp_ehea_port_cb1 *cb1;
2216 	u64 hret;
2217 
2218 	port->vgrp = grp;
2219 
2220 	cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
2221 	if (!cb1) {
2222 		ehea_error("no mem for cb1");
2223 		goto out;
2224 	}
2225 
2226 	hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2227 				       H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2228 	if (hret != H_SUCCESS)
2229 		ehea_error("modify_ehea_port failed");
2230 
2231 	kfree(cb1);
2232 out:
2233 	return;
2234 }
2235 
ehea_vlan_rx_add_vid(struct net_device * dev,unsigned short vid)2236 static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
2237 {
2238 	struct ehea_port *port = netdev_priv(dev);
2239 	struct ehea_adapter *adapter = port->adapter;
2240 	struct hcp_ehea_port_cb1 *cb1;
2241 	int index;
2242 	u64 hret;
2243 
2244 	cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
2245 	if (!cb1) {
2246 		ehea_error("no mem for cb1");
2247 		goto out;
2248 	}
2249 
2250 	hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2251 				      H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2252 	if (hret != H_SUCCESS) {
2253 		ehea_error("query_ehea_port failed");
2254 		goto out;
2255 	}
2256 
2257 	index = (vid / 64);
2258 	cb1->vlan_filter[index] |= ((u64)(0x8000000000000000 >> (vid & 0x3F)));
2259 
2260 	hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2261 				       H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2262 	if (hret != H_SUCCESS)
2263 		ehea_error("modify_ehea_port failed");
2264 out:
2265 	kfree(cb1);
2266 	return;
2267 }
2268 
ehea_vlan_rx_kill_vid(struct net_device * dev,unsigned short vid)2269 static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2270 {
2271 	struct ehea_port *port = netdev_priv(dev);
2272 	struct ehea_adapter *adapter = port->adapter;
2273 	struct hcp_ehea_port_cb1 *cb1;
2274 	int index;
2275 	u64 hret;
2276 
2277 	vlan_group_set_device(port->vgrp, vid, NULL);
2278 
2279 	cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
2280 	if (!cb1) {
2281 		ehea_error("no mem for cb1");
2282 		goto out;
2283 	}
2284 
2285 	hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2286 				      H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2287 	if (hret != H_SUCCESS) {
2288 		ehea_error("query_ehea_port failed");
2289 		goto out;
2290 	}
2291 
2292 	index = (vid / 64);
2293 	cb1->vlan_filter[index] &= ~((u64)(0x8000000000000000 >> (vid & 0x3F)));
2294 
2295 	hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2296 				       H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2297 	if (hret != H_SUCCESS)
2298 		ehea_error("modify_ehea_port failed");
2299 out:
2300 	kfree(cb1);
2301 	return;
2302 }
2303 
ehea_activate_qp(struct ehea_adapter * adapter,struct ehea_qp * qp)2304 int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
2305 {
2306 	int ret = -EIO;
2307 	u64 hret;
2308 	u16 dummy16 = 0;
2309 	u64 dummy64 = 0;
2310 	struct hcp_modify_qp_cb0 *cb0;
2311 
2312 	cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
2313 	if (!cb0) {
2314 		ret = -ENOMEM;
2315 		goto out;
2316 	}
2317 
2318 	hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2319 				    EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2320 	if (hret != H_SUCCESS) {
2321 		ehea_error("query_ehea_qp failed (1)");
2322 		goto out;
2323 	}
2324 
2325 	cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED;
2326 	hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2327 				     EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2328 				     &dummy64, &dummy64, &dummy16, &dummy16);
2329 	if (hret != H_SUCCESS) {
2330 		ehea_error("modify_ehea_qp failed (1)");
2331 		goto out;
2332 	}
2333 
2334 	hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2335 				    EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2336 	if (hret != H_SUCCESS) {
2337 		ehea_error("query_ehea_qp failed (2)");
2338 		goto out;
2339 	}
2340 
2341 	cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED;
2342 	hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2343 				     EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2344 				     &dummy64, &dummy64, &dummy16, &dummy16);
2345 	if (hret != H_SUCCESS) {
2346 		ehea_error("modify_ehea_qp failed (2)");
2347 		goto out;
2348 	}
2349 
2350 	hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2351 				    EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2352 	if (hret != H_SUCCESS) {
2353 		ehea_error("query_ehea_qp failed (3)");
2354 		goto out;
2355 	}
2356 
2357 	cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND;
2358 	hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2359 				     EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2360 				     &dummy64, &dummy64, &dummy16, &dummy16);
2361 	if (hret != H_SUCCESS) {
2362 		ehea_error("modify_ehea_qp failed (3)");
2363 		goto out;
2364 	}
2365 
2366 	hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2367 				    EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2368 	if (hret != H_SUCCESS) {
2369 		ehea_error("query_ehea_qp failed (4)");
2370 		goto out;
2371 	}
2372 
2373 	ret = 0;
2374 out:
2375 	kfree(cb0);
2376 	return ret;
2377 }
2378 
ehea_port_res_setup(struct ehea_port * port,int def_qps,int add_tx_qps)2379 static int ehea_port_res_setup(struct ehea_port *port, int def_qps,
2380 			       int add_tx_qps)
2381 {
2382 	int ret, i;
2383 	struct port_res_cfg pr_cfg, pr_cfg_small_rx;
2384 	enum ehea_eq_type eq_type = EHEA_EQ;
2385 
2386 	port->qp_eq = ehea_create_eq(port->adapter, eq_type,
2387 				   EHEA_MAX_ENTRIES_EQ, 1);
2388 	if (!port->qp_eq) {
2389 		ret = -EINVAL;
2390 		ehea_error("ehea_create_eq failed (qp_eq)");
2391 		goto out_kill_eq;
2392 	}
2393 
2394 	pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries;
2395 	pr_cfg.max_entries_scq = sq_entries * 2;
2396 	pr_cfg.max_entries_sq = sq_entries;
2397 	pr_cfg.max_entries_rq1 = rq1_entries;
2398 	pr_cfg.max_entries_rq2 = rq2_entries;
2399 	pr_cfg.max_entries_rq3 = rq3_entries;
2400 
2401 	pr_cfg_small_rx.max_entries_rcq = 1;
2402 	pr_cfg_small_rx.max_entries_scq = sq_entries;
2403 	pr_cfg_small_rx.max_entries_sq = sq_entries;
2404 	pr_cfg_small_rx.max_entries_rq1 = 1;
2405 	pr_cfg_small_rx.max_entries_rq2 = 1;
2406 	pr_cfg_small_rx.max_entries_rq3 = 1;
2407 
2408 	for (i = 0; i < def_qps; i++) {
2409 		ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i);
2410 		if (ret)
2411 			goto out_clean_pr;
2412 	}
2413 	for (i = def_qps; i < def_qps + add_tx_qps; i++) {
2414 		ret = ehea_init_port_res(port, &port->port_res[i],
2415 					 &pr_cfg_small_rx, i);
2416 		if (ret)
2417 			goto out_clean_pr;
2418 	}
2419 
2420 	return 0;
2421 
2422 out_clean_pr:
2423 	while (--i >= 0)
2424 		ehea_clean_portres(port, &port->port_res[i]);
2425 
2426 out_kill_eq:
2427 	ehea_destroy_eq(port->qp_eq);
2428 	return ret;
2429 }
2430 
ehea_clean_all_portres(struct ehea_port * port)2431 static int ehea_clean_all_portres(struct ehea_port *port)
2432 {
2433 	int ret = 0;
2434 	int i;
2435 
2436 	for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
2437 		ret |= ehea_clean_portres(port, &port->port_res[i]);
2438 
2439 	ret |= ehea_destroy_eq(port->qp_eq);
2440 
2441 	return ret;
2442 }
2443 
ehea_remove_adapter_mr(struct ehea_adapter * adapter)2444 static void ehea_remove_adapter_mr(struct ehea_adapter *adapter)
2445 {
2446 	if (adapter->active_ports)
2447 		return;
2448 
2449 	ehea_rem_mr(&adapter->mr);
2450 }
2451 
ehea_add_adapter_mr(struct ehea_adapter * adapter)2452 static int ehea_add_adapter_mr(struct ehea_adapter *adapter)
2453 {
2454 	if (adapter->active_ports)
2455 		return 0;
2456 
2457 	return ehea_reg_kernel_mr(adapter, &adapter->mr);
2458 }
2459 
ehea_up(struct net_device * dev)2460 static int ehea_up(struct net_device *dev)
2461 {
2462 	int ret, i;
2463 	struct ehea_port *port = netdev_priv(dev);
2464 
2465 	if (port->state == EHEA_PORT_UP)
2466 		return 0;
2467 
2468 	mutex_lock(&ehea_fw_handles.lock);
2469 
2470 	ret = ehea_port_res_setup(port, port->num_def_qps,
2471 				  port->num_add_tx_qps);
2472 	if (ret) {
2473 		ehea_error("port_res_failed");
2474 		goto out;
2475 	}
2476 
2477 	/* Set default QP for this port */
2478 	ret = ehea_configure_port(port);
2479 	if (ret) {
2480 		ehea_error("ehea_configure_port failed. ret:%d", ret);
2481 		goto out_clean_pr;
2482 	}
2483 
2484 	ret = ehea_reg_interrupts(dev);
2485 	if (ret) {
2486 		ehea_error("reg_interrupts failed. ret:%d", ret);
2487 		goto out_clean_pr;
2488 	}
2489 
2490 	for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
2491 		ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
2492 		if (ret) {
2493 			ehea_error("activate_qp failed");
2494 			goto out_free_irqs;
2495 		}
2496 	}
2497 
2498 	for (i = 0; i < port->num_def_qps; i++) {
2499 		ret = ehea_fill_port_res(&port->port_res[i]);
2500 		if (ret) {
2501 			ehea_error("out_free_irqs");
2502 			goto out_free_irqs;
2503 		}
2504 	}
2505 
2506 	spin_lock(&ehea_bcmc_regs.lock);
2507 
2508 	ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
2509 	if (ret) {
2510 		ret = -EIO;
2511 		goto out_free_irqs;
2512 	}
2513 
2514 	port->state = EHEA_PORT_UP;
2515 
2516 	ret = 0;
2517 	goto out;
2518 
2519 out_free_irqs:
2520 	ehea_free_interrupts(dev);
2521 
2522 out_clean_pr:
2523 	ehea_clean_all_portres(port);
2524 out:
2525 	if (ret)
2526 		ehea_info("Failed starting %s. ret=%i", dev->name, ret);
2527 
2528 	ehea_update_bcmc_registrations();
2529 	spin_unlock(&ehea_bcmc_regs.lock);
2530 
2531 	ehea_update_firmware_handles();
2532 	mutex_unlock(&ehea_fw_handles.lock);
2533 
2534 	return ret;
2535 }
2536 
port_napi_disable(struct ehea_port * port)2537 static void port_napi_disable(struct ehea_port *port)
2538 {
2539 	int i;
2540 
2541 	for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
2542 		napi_disable(&port->port_res[i].napi);
2543 }
2544 
port_napi_enable(struct ehea_port * port)2545 static void port_napi_enable(struct ehea_port *port)
2546 {
2547 	int i;
2548 
2549 	for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
2550 		napi_enable(&port->port_res[i].napi);
2551 }
2552 
ehea_open(struct net_device * dev)2553 static int ehea_open(struct net_device *dev)
2554 {
2555 	int ret;
2556 	struct ehea_port *port = netdev_priv(dev);
2557 
2558 	mutex_lock(&port->port_lock);
2559 
2560 	if (netif_msg_ifup(port))
2561 		ehea_info("enabling port %s", dev->name);
2562 
2563 	ret = ehea_up(dev);
2564 	if (!ret) {
2565 		port_napi_enable(port);
2566 		netif_start_queue(dev);
2567 	}
2568 
2569 	mutex_unlock(&port->port_lock);
2570 
2571 	return ret;
2572 }
2573 
ehea_down(struct net_device * dev)2574 static int ehea_down(struct net_device *dev)
2575 {
2576 	int ret;
2577 	struct ehea_port *port = netdev_priv(dev);
2578 
2579 	if (port->state == EHEA_PORT_DOWN)
2580 		return 0;
2581 
2582 	mutex_lock(&ehea_fw_handles.lock);
2583 
2584 	spin_lock(&ehea_bcmc_regs.lock);
2585 	ehea_drop_multicast_list(dev);
2586 	ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2587 
2588 	ehea_free_interrupts(dev);
2589 
2590 	port->state = EHEA_PORT_DOWN;
2591 
2592 	ehea_update_bcmc_registrations();
2593 	spin_unlock(&ehea_bcmc_regs.lock);
2594 
2595 	ret = ehea_clean_all_portres(port);
2596 	if (ret)
2597 		ehea_info("Failed freeing resources for %s. ret=%i",
2598 			  dev->name, ret);
2599 
2600 	ehea_update_firmware_handles();
2601 	mutex_unlock(&ehea_fw_handles.lock);
2602 
2603 	return ret;
2604 }
2605 
ehea_stop(struct net_device * dev)2606 static int ehea_stop(struct net_device *dev)
2607 {
2608 	int ret;
2609 	struct ehea_port *port = netdev_priv(dev);
2610 
2611 	if (netif_msg_ifdown(port))
2612 		ehea_info("disabling port %s", dev->name);
2613 
2614 	set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
2615 	cancel_work_sync(&port->reset_task);
2616 	mutex_lock(&port->port_lock);
2617 	netif_stop_queue(dev);
2618 	port_napi_disable(port);
2619 	ret = ehea_down(dev);
2620 	mutex_unlock(&port->port_lock);
2621 	clear_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
2622 	return ret;
2623 }
2624 
ehea_purge_sq(struct ehea_qp * orig_qp)2625 static void ehea_purge_sq(struct ehea_qp *orig_qp)
2626 {
2627 	struct ehea_qp qp = *orig_qp;
2628 	struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2629 	struct ehea_swqe *swqe;
2630 	int wqe_index;
2631 	int i;
2632 
2633 	for (i = 0; i < init_attr->act_nr_send_wqes; i++) {
2634 		swqe = ehea_get_swqe(&qp, &wqe_index);
2635 		swqe->tx_control |= EHEA_SWQE_PURGE;
2636 	}
2637 }
2638 
ehea_flush_sq(struct ehea_port * port)2639 static void ehea_flush_sq(struct ehea_port *port)
2640 {
2641 	int i;
2642 
2643 	for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
2644 		struct ehea_port_res *pr = &port->port_res[i];
2645 		int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
2646 		int k = 0;
2647 		while (atomic_read(&pr->swqe_avail) < swqe_max) {
2648 			msleep(5);
2649 			if (++k == 20)
2650 				break;
2651 		}
2652 	}
2653 }
2654 
ehea_stop_qps(struct net_device * dev)2655 int ehea_stop_qps(struct net_device *dev)
2656 {
2657 	struct ehea_port *port = netdev_priv(dev);
2658 	struct ehea_adapter *adapter = port->adapter;
2659 	struct hcp_modify_qp_cb0 *cb0;
2660 	int ret = -EIO;
2661 	int dret;
2662 	int i;
2663 	u64 hret;
2664 	u64 dummy64 = 0;
2665 	u16 dummy16 = 0;
2666 
2667 	cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
2668 	if (!cb0) {
2669 		ret = -ENOMEM;
2670 		goto out;
2671 	}
2672 
2673 	for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) {
2674 		struct ehea_port_res *pr =  &port->port_res[i];
2675 		struct ehea_qp *qp = pr->qp;
2676 
2677 		/* Purge send queue */
2678 		ehea_purge_sq(qp);
2679 
2680 		/* Disable queue pair */
2681 		hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2682 					    EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2683 					    cb0);
2684 		if (hret != H_SUCCESS) {
2685 			ehea_error("query_ehea_qp failed (1)");
2686 			goto out;
2687 		}
2688 
2689 		cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2690 		cb0->qp_ctl_reg &= ~H_QP_CR_ENABLED;
2691 
2692 		hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2693 					     EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2694 							    1), cb0, &dummy64,
2695 					     &dummy64, &dummy16, &dummy16);
2696 		if (hret != H_SUCCESS) {
2697 			ehea_error("modify_ehea_qp failed (1)");
2698 			goto out;
2699 		}
2700 
2701 		hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2702 					    EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2703 					    cb0);
2704 		if (hret != H_SUCCESS) {
2705 			ehea_error("query_ehea_qp failed (2)");
2706 			goto out;
2707 		}
2708 
2709 		/* deregister shared memory regions */
2710 		dret = ehea_rem_smrs(pr);
2711 		if (dret) {
2712 			ehea_error("unreg shared memory region failed");
2713 			goto out;
2714 		}
2715 	}
2716 
2717 	ret = 0;
2718 out:
2719 	kfree(cb0);
2720 
2721 	return ret;
2722 }
2723 
ehea_update_rqs(struct ehea_qp * orig_qp,struct ehea_port_res * pr)2724 void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
2725 {
2726 	struct ehea_qp qp = *orig_qp;
2727 	struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2728 	struct ehea_rwqe *rwqe;
2729 	struct sk_buff **skba_rq2 = pr->rq2_skba.arr;
2730 	struct sk_buff **skba_rq3 = pr->rq3_skba.arr;
2731 	struct sk_buff *skb;
2732 	u32 lkey = pr->recv_mr.lkey;
2733 
2734 
2735 	int i;
2736 	int index;
2737 
2738 	for (i = 0; i < init_attr->act_nr_rwqes_rq2 + 1; i++) {
2739 		rwqe = ehea_get_next_rwqe(&qp, 2);
2740 		rwqe->sg_list[0].l_key = lkey;
2741 		index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2742 		skb = skba_rq2[index];
2743 		if (skb)
2744 			rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2745 	}
2746 
2747 	for (i = 0; i < init_attr->act_nr_rwqes_rq3 + 1; i++) {
2748 		rwqe = ehea_get_next_rwqe(&qp, 3);
2749 		rwqe->sg_list[0].l_key = lkey;
2750 		index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2751 		skb = skba_rq3[index];
2752 		if (skb)
2753 			rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2754 	}
2755 }
2756 
ehea_restart_qps(struct net_device * dev)2757 int ehea_restart_qps(struct net_device *dev)
2758 {
2759 	struct ehea_port *port = netdev_priv(dev);
2760 	struct ehea_adapter *adapter = port->adapter;
2761 	int ret = 0;
2762 	int i;
2763 
2764 	struct hcp_modify_qp_cb0 *cb0;
2765 	u64 hret;
2766 	u64 dummy64 = 0;
2767 	u16 dummy16 = 0;
2768 
2769 	cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
2770 	if (!cb0) {
2771 		ret = -ENOMEM;
2772 		goto out;
2773 	}
2774 
2775 	for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) {
2776 		struct ehea_port_res *pr =  &port->port_res[i];
2777 		struct ehea_qp *qp = pr->qp;
2778 
2779 		ret = ehea_gen_smrs(pr);
2780 		if (ret) {
2781 			ehea_error("creation of shared memory regions failed");
2782 			goto out;
2783 		}
2784 
2785 		ehea_update_rqs(qp, pr);
2786 
2787 		/* Enable queue pair */
2788 		hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2789 					    EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2790 					    cb0);
2791 		if (hret != H_SUCCESS) {
2792 			ehea_error("query_ehea_qp failed (1)");
2793 			goto out;
2794 		}
2795 
2796 		cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2797 		cb0->qp_ctl_reg |= H_QP_CR_ENABLED;
2798 
2799 		hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2800 					     EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2801 							    1), cb0, &dummy64,
2802 					     &dummy64, &dummy16, &dummy16);
2803 		if (hret != H_SUCCESS) {
2804 			ehea_error("modify_ehea_qp failed (1)");
2805 			goto out;
2806 		}
2807 
2808 		hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2809 					    EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2810 					    cb0);
2811 		if (hret != H_SUCCESS) {
2812 			ehea_error("query_ehea_qp failed (2)");
2813 			goto out;
2814 		}
2815 
2816 		/* refill entire queue */
2817 		ehea_refill_rq1(pr, pr->rq1_skba.index, 0);
2818 		ehea_refill_rq2(pr, 0);
2819 		ehea_refill_rq3(pr, 0);
2820 	}
2821 out:
2822 	kfree(cb0);
2823 
2824 	return ret;
2825 }
2826 
ehea_reset_port(struct work_struct * work)2827 static void ehea_reset_port(struct work_struct *work)
2828 {
2829 	int ret;
2830 	struct ehea_port *port =
2831 		container_of(work, struct ehea_port, reset_task);
2832 	struct net_device *dev = port->netdev;
2833 
2834 	port->resets++;
2835 	mutex_lock(&port->port_lock);
2836 	netif_stop_queue(dev);
2837 
2838 	port_napi_disable(port);
2839 
2840 	ehea_down(dev);
2841 
2842 	ret = ehea_up(dev);
2843 	if (ret)
2844 		goto out;
2845 
2846 	ehea_set_multicast_list(dev);
2847 
2848 	if (netif_msg_timer(port))
2849 		ehea_info("Device %s resetted successfully", dev->name);
2850 
2851 	port_napi_enable(port);
2852 
2853 	netif_wake_queue(dev);
2854 out:
2855 	mutex_unlock(&port->port_lock);
2856 	return;
2857 }
2858 
ehea_rereg_mrs(struct work_struct * work)2859 static void ehea_rereg_mrs(struct work_struct *work)
2860 {
2861 	int ret, i;
2862 	struct ehea_adapter *adapter;
2863 
2864 	mutex_lock(&dlpar_mem_lock);
2865 	ehea_info("LPAR memory changed - re-initializing driver");
2866 
2867 	list_for_each_entry(adapter, &adapter_list, list)
2868 		if (adapter->active_ports) {
2869 			/* Shutdown all ports */
2870 			for (i = 0; i < EHEA_MAX_PORTS; i++) {
2871 				struct ehea_port *port = adapter->port[i];
2872 				struct net_device *dev;
2873 
2874 				if (!port)
2875 					continue;
2876 
2877 				dev = port->netdev;
2878 
2879 				if (dev->flags & IFF_UP) {
2880 					mutex_lock(&port->port_lock);
2881 					netif_stop_queue(dev);
2882 					ehea_flush_sq(port);
2883 					ret = ehea_stop_qps(dev);
2884 					if (ret) {
2885 						mutex_unlock(&port->port_lock);
2886 						goto out;
2887 					}
2888 					port_napi_disable(port);
2889 					mutex_unlock(&port->port_lock);
2890 				}
2891 			}
2892 
2893 			/* Unregister old memory region */
2894 			ret = ehea_rem_mr(&adapter->mr);
2895 			if (ret) {
2896 				ehea_error("unregister MR failed - driver"
2897 					   " inoperable!");
2898 				goto out;
2899 			}
2900 		}
2901 
2902 	clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
2903 
2904 	list_for_each_entry(adapter, &adapter_list, list)
2905 		if (adapter->active_ports) {
2906 			/* Register new memory region */
2907 			ret = ehea_reg_kernel_mr(adapter, &adapter->mr);
2908 			if (ret) {
2909 				ehea_error("register MR failed - driver"
2910 					   " inoperable!");
2911 				goto out;
2912 			}
2913 
2914 			/* Restart all ports */
2915 			for (i = 0; i < EHEA_MAX_PORTS; i++) {
2916 				struct ehea_port *port = adapter->port[i];
2917 
2918 				if (port) {
2919 					struct net_device *dev = port->netdev;
2920 
2921 					if (dev->flags & IFF_UP) {
2922 						mutex_lock(&port->port_lock);
2923 						port_napi_enable(port);
2924 						ret = ehea_restart_qps(dev);
2925 						if (!ret)
2926 							netif_wake_queue(dev);
2927 						mutex_unlock(&port->port_lock);
2928 					}
2929 				}
2930 			}
2931 		}
2932 	ehea_info("re-initializing driver complete");
2933 out:
2934 	mutex_unlock(&dlpar_mem_lock);
2935 	return;
2936 }
2937 
ehea_tx_watchdog(struct net_device * dev)2938 static void ehea_tx_watchdog(struct net_device *dev)
2939 {
2940 	struct ehea_port *port = netdev_priv(dev);
2941 
2942 	if (netif_carrier_ok(dev) &&
2943 	    !test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
2944 		ehea_schedule_port_reset(port);
2945 }
2946 
ehea_sense_adapter_attr(struct ehea_adapter * adapter)2947 int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
2948 {
2949 	struct hcp_query_ehea *cb;
2950 	u64 hret;
2951 	int ret;
2952 
2953 	cb = kzalloc(PAGE_SIZE, GFP_KERNEL);
2954 	if (!cb) {
2955 		ret = -ENOMEM;
2956 		goto out;
2957 	}
2958 
2959 	hret = ehea_h_query_ehea(adapter->handle, cb);
2960 
2961 	if (hret != H_SUCCESS) {
2962 		ret = -EIO;
2963 		goto out_herr;
2964 	}
2965 
2966 	adapter->max_mc_mac = cb->max_mc_mac - 1;
2967 	ret = 0;
2968 
2969 out_herr:
2970 	kfree(cb);
2971 out:
2972 	return ret;
2973 }
2974 
ehea_get_jumboframe_status(struct ehea_port * port,int * jumbo)2975 int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
2976 {
2977 	struct hcp_ehea_port_cb4 *cb4;
2978 	u64 hret;
2979 	int ret = 0;
2980 
2981 	*jumbo = 0;
2982 
2983 	/* (Try to) enable *jumbo frames */
2984 	cb4 = kzalloc(PAGE_SIZE, GFP_KERNEL);
2985 	if (!cb4) {
2986 		ehea_error("no mem for cb4");
2987 		ret = -ENOMEM;
2988 		goto out;
2989 	} else {
2990 		hret = ehea_h_query_ehea_port(port->adapter->handle,
2991 					      port->logical_port_id,
2992 					      H_PORT_CB4,
2993 					      H_PORT_CB4_JUMBO, cb4);
2994 		if (hret == H_SUCCESS) {
2995 			if (cb4->jumbo_frame)
2996 				*jumbo = 1;
2997 			else {
2998 				cb4->jumbo_frame = 1;
2999 				hret = ehea_h_modify_ehea_port(port->adapter->
3000 							       handle,
3001 							       port->
3002 							       logical_port_id,
3003 							       H_PORT_CB4,
3004 							       H_PORT_CB4_JUMBO,
3005 							       cb4);
3006 				if (hret == H_SUCCESS)
3007 					*jumbo = 1;
3008 			}
3009 		} else
3010 			ret = -EINVAL;
3011 
3012 		kfree(cb4);
3013 	}
3014 out:
3015 	return ret;
3016 }
3017 
ehea_show_port_id(struct device * dev,struct device_attribute * attr,char * buf)3018 static ssize_t ehea_show_port_id(struct device *dev,
3019 				 struct device_attribute *attr, char *buf)
3020 {
3021 	struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
3022 	return sprintf(buf, "%d", port->logical_port_id);
3023 }
3024 
3025 static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id,
3026 		   NULL);
3027 
logical_port_release(struct device * dev)3028 static void __devinit logical_port_release(struct device *dev)
3029 {
3030 	struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
3031 	of_node_put(port->ofdev.node);
3032 }
3033 
ehea_register_port(struct ehea_port * port,struct device_node * dn)3034 static struct device *ehea_register_port(struct ehea_port *port,
3035 					 struct device_node *dn)
3036 {
3037 	int ret;
3038 
3039 	port->ofdev.node = of_node_get(dn);
3040 	port->ofdev.dev.parent = &port->adapter->ofdev->dev;
3041 	port->ofdev.dev.bus = &ibmebus_bus_type;
3042 
3043 	sprintf(port->ofdev.dev.bus_id, "port%d", port_name_cnt++);
3044 	port->ofdev.dev.release = logical_port_release;
3045 
3046 	ret = of_device_register(&port->ofdev);
3047 	if (ret) {
3048 		ehea_error("failed to register device. ret=%d", ret);
3049 		goto out;
3050 	}
3051 
3052 	ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id);
3053 	if (ret) {
3054 		ehea_error("failed to register attributes, ret=%d", ret);
3055 		goto out_unreg_of_dev;
3056 	}
3057 
3058 	return &port->ofdev.dev;
3059 
3060 out_unreg_of_dev:
3061 	of_device_unregister(&port->ofdev);
3062 out:
3063 	return NULL;
3064 }
3065 
ehea_unregister_port(struct ehea_port * port)3066 static void ehea_unregister_port(struct ehea_port *port)
3067 {
3068 	device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
3069 	of_device_unregister(&port->ofdev);
3070 }
3071 
ehea_setup_single_port(struct ehea_adapter * adapter,u32 logical_port_id,struct device_node * dn)3072 struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
3073 					 u32 logical_port_id,
3074 					 struct device_node *dn)
3075 {
3076 	int ret;
3077 	struct net_device *dev;
3078 	struct ehea_port *port;
3079 	struct device *port_dev;
3080 	int jumbo;
3081 
3082 	/* allocate memory for the port structures */
3083 	dev = alloc_etherdev(sizeof(struct ehea_port));
3084 
3085 	if (!dev) {
3086 		ehea_error("no mem for net_device");
3087 		ret = -ENOMEM;
3088 		goto out_err;
3089 	}
3090 
3091 	port = netdev_priv(dev);
3092 
3093 	mutex_init(&port->port_lock);
3094 	port->state = EHEA_PORT_DOWN;
3095 	port->sig_comp_iv = sq_entries / 10;
3096 
3097 	port->adapter = adapter;
3098 	port->netdev = dev;
3099 	port->logical_port_id = logical_port_id;
3100 
3101 	port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT);
3102 
3103 	port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL);
3104 	if (!port->mc_list) {
3105 		ret = -ENOMEM;
3106 		goto out_free_ethdev;
3107 	}
3108 
3109 	INIT_LIST_HEAD(&port->mc_list->list);
3110 
3111 	ret = ehea_sense_port_attr(port);
3112 	if (ret)
3113 		goto out_free_mc_list;
3114 
3115 	port_dev = ehea_register_port(port, dn);
3116 	if (!port_dev)
3117 		goto out_free_mc_list;
3118 
3119 	SET_NETDEV_DEV(dev, port_dev);
3120 
3121 	/* initialize net_device structure */
3122 	memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
3123 
3124 	dev->open = ehea_open;
3125 #ifdef CONFIG_NET_POLL_CONTROLLER
3126 	dev->poll_controller = ehea_netpoll;
3127 #endif
3128 	dev->stop = ehea_stop;
3129 	dev->hard_start_xmit = ehea_start_xmit;
3130 	dev->get_stats = ehea_get_stats;
3131 	dev->set_multicast_list = ehea_set_multicast_list;
3132 	dev->set_mac_address = ehea_set_mac_addr;
3133 	dev->change_mtu = ehea_change_mtu;
3134 	dev->vlan_rx_register = ehea_vlan_rx_register;
3135 	dev->vlan_rx_add_vid = ehea_vlan_rx_add_vid;
3136 	dev->vlan_rx_kill_vid = ehea_vlan_rx_kill_vid;
3137 	dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
3138 		      | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX
3139 		      | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
3140 		      | NETIF_F_LLTX;
3141 	dev->tx_timeout = &ehea_tx_watchdog;
3142 	dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
3143 
3144 	INIT_WORK(&port->reset_task, ehea_reset_port);
3145 	ehea_set_ethtool_ops(dev);
3146 
3147 	ret = register_netdev(dev);
3148 	if (ret) {
3149 		ehea_error("register_netdev failed. ret=%d", ret);
3150 		goto out_unreg_port;
3151 	}
3152 
3153 	port->lro_max_aggr = lro_max_aggr;
3154 
3155 	ret = ehea_get_jumboframe_status(port, &jumbo);
3156 	if (ret)
3157 		ehea_error("failed determining jumbo frame status for %s",
3158 			   port->netdev->name);
3159 
3160 	ehea_info("%s: Jumbo frames are %sabled", dev->name,
3161 		  jumbo == 1 ? "en" : "dis");
3162 
3163 	adapter->active_ports++;
3164 
3165 	return port;
3166 
3167 out_unreg_port:
3168 	ehea_unregister_port(port);
3169 
3170 out_free_mc_list:
3171 	kfree(port->mc_list);
3172 
3173 out_free_ethdev:
3174 	free_netdev(dev);
3175 
3176 out_err:
3177 	ehea_error("setting up logical port with id=%d failed, ret=%d",
3178 		   logical_port_id, ret);
3179 	return NULL;
3180 }
3181 
ehea_shutdown_single_port(struct ehea_port * port)3182 static void ehea_shutdown_single_port(struct ehea_port *port)
3183 {
3184 	struct ehea_adapter *adapter = port->adapter;
3185 	unregister_netdev(port->netdev);
3186 	ehea_unregister_port(port);
3187 	kfree(port->mc_list);
3188 	free_netdev(port->netdev);
3189 	adapter->active_ports--;
3190 }
3191 
ehea_setup_ports(struct ehea_adapter * adapter)3192 static int ehea_setup_ports(struct ehea_adapter *adapter)
3193 {
3194 	struct device_node *lhea_dn;
3195 	struct device_node *eth_dn = NULL;
3196 
3197 	const u32 *dn_log_port_id;
3198 	int i = 0;
3199 
3200 	lhea_dn = adapter->ofdev->node;
3201 	while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3202 
3203 		dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3204 						 NULL);
3205 		if (!dn_log_port_id) {
3206 			ehea_error("bad device node: eth_dn name=%s",
3207 				   eth_dn->full_name);
3208 			continue;
3209 		}
3210 
3211 		if (ehea_add_adapter_mr(adapter)) {
3212 			ehea_error("creating MR failed");
3213 			of_node_put(eth_dn);
3214 			return -EIO;
3215 		}
3216 
3217 		adapter->port[i] = ehea_setup_single_port(adapter,
3218 							  *dn_log_port_id,
3219 							  eth_dn);
3220 		if (adapter->port[i])
3221 			ehea_info("%s -> logical port id #%d",
3222 				  adapter->port[i]->netdev->name,
3223 				  *dn_log_port_id);
3224 		else
3225 			ehea_remove_adapter_mr(adapter);
3226 
3227 		i++;
3228 	};
3229 	return 0;
3230 }
3231 
ehea_get_eth_dn(struct ehea_adapter * adapter,u32 logical_port_id)3232 static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
3233 					   u32 logical_port_id)
3234 {
3235 	struct device_node *lhea_dn;
3236 	struct device_node *eth_dn = NULL;
3237 	const u32 *dn_log_port_id;
3238 
3239 	lhea_dn = adapter->ofdev->node;
3240 	while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3241 
3242 		dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3243 						 NULL);
3244 		if (dn_log_port_id)
3245 			if (*dn_log_port_id == logical_port_id)
3246 				return eth_dn;
3247 	};
3248 
3249 	return NULL;
3250 }
3251 
ehea_probe_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3252 static ssize_t ehea_probe_port(struct device *dev,
3253 			       struct device_attribute *attr,
3254 			       const char *buf, size_t count)
3255 {
3256 	struct ehea_adapter *adapter = dev->driver_data;
3257 	struct ehea_port *port;
3258 	struct device_node *eth_dn = NULL;
3259 	int i;
3260 
3261 	u32 logical_port_id;
3262 
3263 	sscanf(buf, "%d", &logical_port_id);
3264 
3265 	port = ehea_get_port(adapter, logical_port_id);
3266 
3267 	if (port) {
3268 		ehea_info("adding port with logical port id=%d failed. port "
3269 			  "already configured as %s.", logical_port_id,
3270 			  port->netdev->name);
3271 		return -EINVAL;
3272 	}
3273 
3274 	eth_dn = ehea_get_eth_dn(adapter, logical_port_id);
3275 
3276 	if (!eth_dn) {
3277 		ehea_info("no logical port with id %d found", logical_port_id);
3278 		return -EINVAL;
3279 	}
3280 
3281 	if (ehea_add_adapter_mr(adapter)) {
3282 		ehea_error("creating MR failed");
3283 		return -EIO;
3284 	}
3285 
3286 	port = ehea_setup_single_port(adapter, logical_port_id, eth_dn);
3287 
3288 	of_node_put(eth_dn);
3289 
3290 	if (port) {
3291 		for (i = 0; i < EHEA_MAX_PORTS; i++)
3292 			if (!adapter->port[i]) {
3293 				adapter->port[i] = port;
3294 				break;
3295 			}
3296 
3297 		ehea_info("added %s (logical port id=%d)", port->netdev->name,
3298 			  logical_port_id);
3299 	} else {
3300 		ehea_remove_adapter_mr(adapter);
3301 		return -EIO;
3302 	}
3303 
3304 	return (ssize_t) count;
3305 }
3306 
ehea_remove_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3307 static ssize_t ehea_remove_port(struct device *dev,
3308 				struct device_attribute *attr,
3309 				const char *buf, size_t count)
3310 {
3311 	struct ehea_adapter *adapter = dev->driver_data;
3312 	struct ehea_port *port;
3313 	int i;
3314 	u32 logical_port_id;
3315 
3316 	sscanf(buf, "%d", &logical_port_id);
3317 
3318 	port = ehea_get_port(adapter, logical_port_id);
3319 
3320 	if (port) {
3321 		ehea_info("removed %s (logical port id=%d)", port->netdev->name,
3322 			  logical_port_id);
3323 
3324 		ehea_shutdown_single_port(port);
3325 
3326 		for (i = 0; i < EHEA_MAX_PORTS; i++)
3327 			if (adapter->port[i] == port) {
3328 				adapter->port[i] = NULL;
3329 				break;
3330 			}
3331 	} else {
3332 		ehea_error("removing port with logical port id=%d failed. port "
3333 			   "not configured.", logical_port_id);
3334 		return -EINVAL;
3335 	}
3336 
3337 	ehea_remove_adapter_mr(adapter);
3338 
3339 	return (ssize_t) count;
3340 }
3341 
3342 static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port);
3343 static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port);
3344 
ehea_create_device_sysfs(struct of_device * dev)3345 int ehea_create_device_sysfs(struct of_device *dev)
3346 {
3347 	int ret = device_create_file(&dev->dev, &dev_attr_probe_port);
3348 	if (ret)
3349 		goto out;
3350 
3351 	ret = device_create_file(&dev->dev, &dev_attr_remove_port);
3352 out:
3353 	return ret;
3354 }
3355 
ehea_remove_device_sysfs(struct of_device * dev)3356 void ehea_remove_device_sysfs(struct of_device *dev)
3357 {
3358 	device_remove_file(&dev->dev, &dev_attr_probe_port);
3359 	device_remove_file(&dev->dev, &dev_attr_remove_port);
3360 }
3361 
ehea_probe_adapter(struct of_device * dev,const struct of_device_id * id)3362 static int __devinit ehea_probe_adapter(struct of_device *dev,
3363 					const struct of_device_id *id)
3364 {
3365 	struct ehea_adapter *adapter;
3366 	const u64 *adapter_handle;
3367 	int ret;
3368 
3369 	if (!dev || !dev->node) {
3370 		ehea_error("Invalid ibmebus device probed");
3371 		return -EINVAL;
3372 	}
3373 	mutex_lock(&ehea_fw_handles.lock);
3374 
3375 	adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3376 	if (!adapter) {
3377 		ret = -ENOMEM;
3378 		dev_err(&dev->dev, "no mem for ehea_adapter\n");
3379 		goto out;
3380 	}
3381 
3382 	list_add(&adapter->list, &adapter_list);
3383 
3384 	adapter->ofdev = dev;
3385 
3386 	adapter_handle = of_get_property(dev->node, "ibm,hea-handle",
3387 					 NULL);
3388 	if (adapter_handle)
3389 		adapter->handle = *adapter_handle;
3390 
3391 	if (!adapter->handle) {
3392 		dev_err(&dev->dev, "failed getting handle for adapter"
3393 			" '%s'\n", dev->node->full_name);
3394 		ret = -ENODEV;
3395 		goto out_free_ad;
3396 	}
3397 
3398 	adapter->pd = EHEA_PD_ID;
3399 
3400 	dev->dev.driver_data = adapter;
3401 
3402 
3403 	/* initialize adapter and ports */
3404 	/* get adapter properties */
3405 	ret = ehea_sense_adapter_attr(adapter);
3406 	if (ret) {
3407 		dev_err(&dev->dev, "sense_adapter_attr failed: %d\n", ret);
3408 		goto out_free_ad;
3409 	}
3410 
3411 	adapter->neq = ehea_create_eq(adapter,
3412 				      EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1);
3413 	if (!adapter->neq) {
3414 		ret = -EIO;
3415 		dev_err(&dev->dev, "NEQ creation failed\n");
3416 		goto out_free_ad;
3417 	}
3418 
3419 	tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet,
3420 		     (unsigned long)adapter);
3421 
3422 	ret = ibmebus_request_irq(adapter->neq->attr.ist1,
3423 				  ehea_interrupt_neq, IRQF_DISABLED,
3424 				  "ehea_neq", adapter);
3425 	if (ret) {
3426 		dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
3427 		goto out_kill_eq;
3428 	}
3429 
3430 	ret = ehea_create_device_sysfs(dev);
3431 	if (ret)
3432 		goto out_free_irq;
3433 
3434 	ret = ehea_setup_ports(adapter);
3435 	if (ret) {
3436 		dev_err(&dev->dev, "setup_ports failed\n");
3437 		goto out_rem_dev_sysfs;
3438 	}
3439 
3440 	ret = 0;
3441 	goto out;
3442 
3443 out_rem_dev_sysfs:
3444 	ehea_remove_device_sysfs(dev);
3445 
3446 out_free_irq:
3447 	ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
3448 
3449 out_kill_eq:
3450 	ehea_destroy_eq(adapter->neq);
3451 
3452 out_free_ad:
3453 	kfree(adapter);
3454 
3455 out:
3456 	ehea_update_firmware_handles();
3457 	mutex_unlock(&ehea_fw_handles.lock);
3458 	return ret;
3459 }
3460 
ehea_remove(struct of_device * dev)3461 static int __devexit ehea_remove(struct of_device *dev)
3462 {
3463 	struct ehea_adapter *adapter = dev->dev.driver_data;
3464 	int i;
3465 
3466 	for (i = 0; i < EHEA_MAX_PORTS; i++)
3467 		if (adapter->port[i]) {
3468 			ehea_shutdown_single_port(adapter->port[i]);
3469 			adapter->port[i] = NULL;
3470 		}
3471 
3472 	ehea_remove_device_sysfs(dev);
3473 
3474 	flush_scheduled_work();
3475 
3476 	mutex_lock(&ehea_fw_handles.lock);
3477 
3478 	ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
3479 	tasklet_kill(&adapter->neq_tasklet);
3480 
3481 	ehea_destroy_eq(adapter->neq);
3482 	ehea_remove_adapter_mr(adapter);
3483 	list_del(&adapter->list);
3484 	kfree(adapter);
3485 
3486 	ehea_update_firmware_handles();
3487 	mutex_unlock(&ehea_fw_handles.lock);
3488 
3489 	return 0;
3490 }
3491 
ehea_crash_handler(void)3492 void ehea_crash_handler(void)
3493 {
3494 	int i;
3495 
3496 	if (ehea_fw_handles.arr)
3497 		for (i = 0; i < ehea_fw_handles.num_entries; i++)
3498 			ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
3499 					     ehea_fw_handles.arr[i].fwh,
3500 					     FORCE_FREE);
3501 
3502 	if (ehea_bcmc_regs.arr)
3503 		for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
3504 			ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
3505 					      ehea_bcmc_regs.arr[i].port_id,
3506 					      ehea_bcmc_regs.arr[i].reg_type,
3507 					      ehea_bcmc_regs.arr[i].macaddr,
3508 					      0, H_DEREG_BCMC);
3509 }
3510 
ehea_mem_notifier(struct notifier_block * nb,unsigned long action,void * data)3511 static int ehea_mem_notifier(struct notifier_block *nb,
3512                              unsigned long action, void *data)
3513 {
3514 	struct memory_notify *arg = data;
3515 	switch (action) {
3516 	case MEM_CANCEL_OFFLINE:
3517 		ehea_info("memory offlining canceled");
3518 		/* Readd canceled memory block */
3519 	case MEM_ONLINE:
3520 		ehea_info("memory is going online");
3521 		if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
3522 			return NOTIFY_BAD;
3523 		ehea_rereg_mrs(NULL);
3524 		break;
3525 	case MEM_GOING_OFFLINE:
3526 		ehea_info("memory is going offline");
3527 		if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
3528 			return NOTIFY_BAD;
3529 		ehea_rereg_mrs(NULL);
3530 		break;
3531 	default:
3532 		break;
3533 	}
3534 	return NOTIFY_OK;
3535 }
3536 
3537 static struct notifier_block ehea_mem_nb = {
3538 	.notifier_call = ehea_mem_notifier,
3539 };
3540 
ehea_reboot_notifier(struct notifier_block * nb,unsigned long action,void * unused)3541 static int ehea_reboot_notifier(struct notifier_block *nb,
3542 				unsigned long action, void *unused)
3543 {
3544 	if (action == SYS_RESTART) {
3545 		ehea_info("Reboot: freeing all eHEA resources");
3546 		ibmebus_unregister_driver(&ehea_driver);
3547 	}
3548 	return NOTIFY_DONE;
3549 }
3550 
3551 static struct notifier_block ehea_reboot_nb = {
3552 	.notifier_call = ehea_reboot_notifier,
3553 };
3554 
check_module_parm(void)3555 static int check_module_parm(void)
3556 {
3557 	int ret = 0;
3558 
3559 	if ((rq1_entries < EHEA_MIN_ENTRIES_QP) ||
3560 	    (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) {
3561 		ehea_info("Bad parameter: rq1_entries");
3562 		ret = -EINVAL;
3563 	}
3564 	if ((rq2_entries < EHEA_MIN_ENTRIES_QP) ||
3565 	    (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) {
3566 		ehea_info("Bad parameter: rq2_entries");
3567 		ret = -EINVAL;
3568 	}
3569 	if ((rq3_entries < EHEA_MIN_ENTRIES_QP) ||
3570 	    (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) {
3571 		ehea_info("Bad parameter: rq3_entries");
3572 		ret = -EINVAL;
3573 	}
3574 	if ((sq_entries < EHEA_MIN_ENTRIES_QP) ||
3575 	    (sq_entries > EHEA_MAX_ENTRIES_SQ)) {
3576 		ehea_info("Bad parameter: sq_entries");
3577 		ret = -EINVAL;
3578 	}
3579 
3580 	return ret;
3581 }
3582 
ehea_show_capabilities(struct device_driver * drv,char * buf)3583 static ssize_t ehea_show_capabilities(struct device_driver *drv,
3584 				      char *buf)
3585 {
3586 	return sprintf(buf, "%d", EHEA_CAPABILITIES);
3587 }
3588 
3589 static DRIVER_ATTR(capabilities, S_IRUSR | S_IRGRP | S_IROTH,
3590 		   ehea_show_capabilities, NULL);
3591 
ehea_module_init(void)3592 int __init ehea_module_init(void)
3593 {
3594 	int ret;
3595 
3596 	printk(KERN_INFO "IBM eHEA ethernet device driver (Release %s)\n",
3597 	       DRV_VERSION);
3598 
3599 
3600 	INIT_WORK(&ehea_rereg_mr_task, ehea_rereg_mrs);
3601 	memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles));
3602 	memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
3603 
3604 	mutex_init(&ehea_fw_handles.lock);
3605 	spin_lock_init(&ehea_bcmc_regs.lock);
3606 
3607 	ret = check_module_parm();
3608 	if (ret)
3609 		goto out;
3610 
3611 	ret = ehea_create_busmap();
3612 	if (ret)
3613 		goto out;
3614 
3615 	ret = register_reboot_notifier(&ehea_reboot_nb);
3616 	if (ret)
3617 		ehea_info("failed registering reboot notifier");
3618 
3619 	ret = register_memory_notifier(&ehea_mem_nb);
3620 	if (ret)
3621 		ehea_info("failed registering memory remove notifier");
3622 
3623 	ret = crash_shutdown_register(&ehea_crash_handler);
3624 	if (ret)
3625 		ehea_info("failed registering crash handler");
3626 
3627 	ret = ibmebus_register_driver(&ehea_driver);
3628 	if (ret) {
3629 		ehea_error("failed registering eHEA device driver on ebus");
3630 		goto out2;
3631 	}
3632 
3633 	ret = driver_create_file(&ehea_driver.driver,
3634 				 &driver_attr_capabilities);
3635 	if (ret) {
3636 		ehea_error("failed to register capabilities attribute, ret=%d",
3637 			   ret);
3638 		goto out3;
3639 	}
3640 
3641 	return ret;
3642 
3643 out3:
3644 	ibmebus_unregister_driver(&ehea_driver);
3645 out2:
3646 	unregister_memory_notifier(&ehea_mem_nb);
3647 	unregister_reboot_notifier(&ehea_reboot_nb);
3648 	crash_shutdown_unregister(&ehea_crash_handler);
3649 out:
3650 	return ret;
3651 }
3652 
ehea_module_exit(void)3653 static void __exit ehea_module_exit(void)
3654 {
3655 	int ret;
3656 
3657 	flush_scheduled_work();
3658 	driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
3659 	ibmebus_unregister_driver(&ehea_driver);
3660 	unregister_reboot_notifier(&ehea_reboot_nb);
3661 	ret = crash_shutdown_unregister(&ehea_crash_handler);
3662 	if (ret)
3663 		ehea_info("failed unregistering crash handler");
3664 	unregister_memory_notifier(&ehea_mem_nb);
3665 	kfree(ehea_fw_handles.arr);
3666 	kfree(ehea_bcmc_regs.arr);
3667 	ehea_destroy_busmap();
3668 }
3669 
3670 module_init(ehea_module_init);
3671 module_exit(ehea_module_exit);
3672