• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
2 /*
3  * Copyright(c) 2020 Intel Corporation.
4  *
5  */
6 
7 /*
8  * This file contains HFI1 support for IPOIB functionality
9  */
10 
11 #ifndef HFI1_IPOIB_H
12 #define HFI1_IPOIB_H
13 
14 #include <linux/types.h>
15 #include <linux/stddef.h>
16 #include <linux/atomic.h>
17 #include <linux/netdevice.h>
18 #include <linux/slab.h>
19 #include <linux/skbuff.h>
20 #include <linux/list.h>
21 #include <linux/if_infiniband.h>
22 
23 #include "hfi.h"
24 #include "iowait.h"
25 #include "netdev.h"
26 
27 #include <rdma/ib_verbs.h>
28 
29 #define HFI1_IPOIB_ENTROPY_SHIFT   24
30 
31 #define HFI1_IPOIB_TXREQ_NAME_LEN   32
32 
33 #define HFI1_IPOIB_PSEUDO_LEN 20
34 #define HFI1_IPOIB_ENCAP_LEN 4
35 
36 struct hfi1_ipoib_dev_priv;
37 
38 union hfi1_ipoib_flow {
39 	u16 as_int;
40 	struct {
41 		u8 tx_queue;
42 		u8 sc5;
43 	} __attribute__((__packed__));
44 };
45 
46 /**
47  * struct hfi1_ipoib_circ_buf - List of items to be processed
48  * @items: ring of items
49  * @head: ring head
50  * @tail: ring tail
51  * @max_items: max items + 1 that the ring can contain
52  * @producer_lock: producer sync lock
53  * @consumer_lock: consumer sync lock
54  */
55 struct ipoib_txreq;
56 struct hfi1_ipoib_circ_buf {
57 	struct ipoib_txreq **items;
58 	unsigned long head;
59 	unsigned long tail;
60 	unsigned long max_items;
61 	spinlock_t producer_lock; /* head sync lock */
62 	spinlock_t consumer_lock; /* tail sync lock */
63 };
64 
65 /**
66  * struct hfi1_ipoib_txq - IPOIB per Tx queue information
67  * @priv: private pointer
68  * @sde: sdma engine
69  * @tx_list: tx request list
70  * @sent_txreqs: count of txreqs posted to sdma
71  * @stops: count of stops of queue
72  * @ring_full: ring has been filled
73  * @no_desc: descriptor shortage seen
74  * @flow: tracks when list needs to be flushed for a flow change
75  * @q_idx: ipoib Tx queue index
76  * @pkts_sent: indicator packets have been sent from this queue
77  * @wait: iowait structure
78  * @complete_txreqs: count of txreqs completed by sdma
79  * @napi: pointer to tx napi interface
80  * @tx_ring: ring of ipoib txreqs to be reaped by napi callback
81  */
82 struct hfi1_ipoib_txq {
83 	struct hfi1_ipoib_dev_priv *priv;
84 	struct sdma_engine *sde;
85 	struct list_head tx_list;
86 	u64 sent_txreqs;
87 	atomic_t stops;
88 	atomic_t ring_full;
89 	atomic_t no_desc;
90 	union hfi1_ipoib_flow flow;
91 	u8 q_idx;
92 	bool pkts_sent;
93 	struct iowait wait;
94 
95 	atomic64_t ____cacheline_aligned_in_smp complete_txreqs;
96 	struct napi_struct *napi;
97 	struct hfi1_ipoib_circ_buf tx_ring;
98 };
99 
100 struct hfi1_ipoib_dev_priv {
101 	struct hfi1_devdata *dd;
102 	struct net_device   *netdev;
103 	struct ib_device    *device;
104 	struct hfi1_ipoib_txq *txqs;
105 	struct kmem_cache *txreq_cache;
106 	struct napi_struct *tx_napis;
107 	u16 pkey;
108 	u16 pkey_index;
109 	u32 qkey;
110 	u8 port_num;
111 
112 	const struct net_device_ops *netdev_ops;
113 	struct rvt_qp *qp;
114 	struct pcpu_sw_netstats __percpu *netstats;
115 };
116 
117 /* hfi1 ipoib rdma netdev's private data structure */
118 struct hfi1_ipoib_rdma_netdev {
119 	struct rdma_netdev rn;  /* keep this first */
120 	/* followed by device private data */
121 	struct hfi1_ipoib_dev_priv dev_priv;
122 };
123 
124 static inline struct hfi1_ipoib_dev_priv *
hfi1_ipoib_priv(const struct net_device * dev)125 hfi1_ipoib_priv(const struct net_device *dev)
126 {
127 	return &((struct hfi1_ipoib_rdma_netdev *)netdev_priv(dev))->dev_priv;
128 }
129 
130 static inline void
hfi1_ipoib_update_rx_netstats(struct hfi1_ipoib_dev_priv * priv,u64 packets,u64 bytes)131 hfi1_ipoib_update_rx_netstats(struct hfi1_ipoib_dev_priv *priv,
132 			      u64 packets,
133 			      u64 bytes)
134 {
135 	struct pcpu_sw_netstats *netstats = this_cpu_ptr(priv->netstats);
136 
137 	u64_stats_update_begin(&netstats->syncp);
138 	netstats->rx_packets += packets;
139 	netstats->rx_bytes += bytes;
140 	u64_stats_update_end(&netstats->syncp);
141 }
142 
143 static inline void
hfi1_ipoib_update_tx_netstats(struct hfi1_ipoib_dev_priv * priv,u64 packets,u64 bytes)144 hfi1_ipoib_update_tx_netstats(struct hfi1_ipoib_dev_priv *priv,
145 			      u64 packets,
146 			      u64 bytes)
147 {
148 	struct pcpu_sw_netstats *netstats = this_cpu_ptr(priv->netstats);
149 
150 	u64_stats_update_begin(&netstats->syncp);
151 	netstats->tx_packets += packets;
152 	netstats->tx_bytes += bytes;
153 	u64_stats_update_end(&netstats->syncp);
154 }
155 
156 int hfi1_ipoib_send_dma(struct net_device *dev,
157 			struct sk_buff *skb,
158 			struct ib_ah *address,
159 			u32 dqpn);
160 
161 int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv);
162 void hfi1_ipoib_txreq_deinit(struct hfi1_ipoib_dev_priv *priv);
163 
164 int hfi1_ipoib_rxq_init(struct net_device *dev);
165 void hfi1_ipoib_rxq_deinit(struct net_device *dev);
166 
167 void hfi1_ipoib_napi_tx_enable(struct net_device *dev);
168 void hfi1_ipoib_napi_tx_disable(struct net_device *dev);
169 
170 struct sk_buff *hfi1_ipoib_prepare_skb(struct hfi1_netdev_rxq *rxq,
171 				       int size, void *data);
172 
173 int hfi1_ipoib_rn_get_params(struct ib_device *device,
174 			     u8 port_num,
175 			     enum rdma_netdev_t type,
176 			     struct rdma_netdev_alloc_params *params);
177 
178 #endif /* _IPOIB_H */
179