1 /**********************************************************************
2 * Author: Cavium, Inc.
3 *
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
6 *
7 * Copyright (c) 2003-2015 Cavium, Inc.
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * This file may also be available under a different license from Cavium.
20 * Contact Cavium, Inc. for more information
21 **********************************************************************/
22
23 /*! \file octeon_network.h
24 * \brief Host NIC Driver: Structure and Macro definitions used by NIC Module.
25 */
26
27 #ifndef __OCTEON_NETWORK_H__
28 #define __OCTEON_NETWORK_H__
29 #include <linux/ptp_clock_kernel.h>
30
31 #define LIO_MAX_MTU_SIZE (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE)
32 #define LIO_MIN_MTU_SIZE 68
33
34 struct oct_nic_stats_resp {
35 u64 rh;
36 struct oct_link_stats stats;
37 u64 status;
38 };
39
40 struct oct_nic_stats_ctrl {
41 struct completion complete;
42 struct net_device *netdev;
43 };
44
45 /** LiquidIO per-interface network private data */
46 struct lio {
47 /** State of the interface. Rx/Tx happens only in the RUNNING state. */
48 atomic_t ifstate;
49
50 /** Octeon Interface index number. This device will be represented as
51 * oct<ifidx> in the system.
52 */
53 int ifidx;
54
55 /** Octeon Input queue to use to transmit for this network interface. */
56 int txq;
57
58 /** Octeon Output queue from which pkts arrive
59 * for this network interface.
60 */
61 int rxq;
62
63 /** Guards each glist */
64 spinlock_t *glist_lock;
65
66 /** Array of gather component linked lists */
67 struct list_head *glist;
68
69 /** Pointer to the NIC properties for the Octeon device this network
70 * interface is associated with.
71 */
72 struct octdev_props *octprops;
73
74 /** Pointer to the octeon device structure. */
75 struct octeon_device *oct_dev;
76
77 struct net_device *netdev;
78
79 /** Link information sent by the core application for this interface. */
80 struct oct_link_info linfo;
81
82 /** counter of link changes */
83 u64 link_changes;
84
85 /** Size of Tx queue for this octeon device. */
86 u32 tx_qsize;
87
88 /** Size of Rx queue for this octeon device. */
89 u32 rx_qsize;
90
91 /** Size of MTU this octeon device. */
92 u32 mtu;
93
94 /** msg level flag per interface. */
95 u32 msg_enable;
96
97 /** Copy of Interface capabilities: TSO, TSO6, LRO, Chescksums . */
98 u64 dev_capability;
99
100 /* Copy of transmit encapsulation capabilities:
101 * TSO, TSO6, Checksums for this device for Kernel
102 * 3.10.0 onwards
103 */
104 u64 enc_dev_capability;
105
106 /** Copy of beacaon reg in phy */
107 u32 phy_beacon_val;
108
109 /** Copy of ctrl reg in phy */
110 u32 led_ctrl_val;
111
112 /* PTP clock information */
113 struct ptp_clock_info ptp_info;
114 struct ptp_clock *ptp_clock;
115 s64 ptp_adjust;
116
117 /* for atomic access to Octeon PTP reg and data struct */
118 spinlock_t ptp_lock;
119
120 /* Interface info */
121 u32 intf_open;
122
123 /* work queue for txq status */
124 struct cavium_wq txq_status_wq;
125
126 /* work queue for link status */
127 struct cavium_wq link_status_wq;
128
129 };
130
131 #define LIO_SIZE (sizeof(struct lio))
132 #define GET_LIO(netdev) ((struct lio *)netdev_priv(netdev))
133
134 #define CIU3_WDOG(c) (0x1010000020000ULL + (c << 3))
135 #define CIU3_WDOG_MASK 12ULL
136 #define LIO_MONITOR_WDOG_EXPIRE 1
137 #define LIO_MONITOR_CORE_STUCK_MSGD 2
138 #define LIO_MAX_CORES 12
139
140 /**
141 * \brief Enable or disable feature
142 * @param netdev pointer to network device
143 * @param cmd Command that just requires acknowledgment
144 * @param param1 Parameter to command
145 */
146 int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1);
147
148 /**
149 * \brief Link control command completion callback
150 * @param nctrl_ptr pointer to control packet structure
151 *
152 * This routine is called by the callback function when a ctrl pkt sent to
153 * core app completes. The nctrl_ptr contains a copy of the command type
154 * and data sent to the core app. This routine is only called if the ctrl
155 * pkt was sent successfully to the core app.
156 */
157 void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr);
158
159 /**
160 * \brief Register ethtool operations
161 * @param netdev pointer to network device
162 */
163 void liquidio_set_ethtool_ops(struct net_device *netdev);
164
165 #define SKB_ADJ_MASK 0x3F
166 #define SKB_ADJ (SKB_ADJ_MASK + 1)
167
168 #define MIN_SKB_SIZE 256 /* 8 bytes and more - 8 bytes for PTP */
169 #define LIO_RXBUFFER_SZ 2048
170
171 static inline void
recv_buffer_alloc(struct octeon_device * oct,struct octeon_skb_page_info * pg_info)172 *recv_buffer_alloc(struct octeon_device *oct,
173 struct octeon_skb_page_info *pg_info)
174 {
175 struct page *page;
176 struct sk_buff *skb;
177 struct octeon_skb_page_info *skb_pg_info;
178
179 page = alloc_page(GFP_ATOMIC | __GFP_COLD);
180 if (unlikely(!page))
181 return NULL;
182
183 skb = dev_alloc_skb(MIN_SKB_SIZE + SKB_ADJ);
184 if (unlikely(!skb)) {
185 __free_page(page);
186 pg_info->page = NULL;
187 return NULL;
188 }
189
190 if ((unsigned long)skb->data & SKB_ADJ_MASK) {
191 u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
192
193 skb_reserve(skb, r);
194 }
195
196 skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
197 /* Get DMA info */
198 pg_info->dma = dma_map_page(&oct->pci_dev->dev, page, 0,
199 PAGE_SIZE, DMA_FROM_DEVICE);
200
201 /* Mapping failed!! */
202 if (dma_mapping_error(&oct->pci_dev->dev, pg_info->dma)) {
203 __free_page(page);
204 dev_kfree_skb_any((struct sk_buff *)skb);
205 pg_info->page = NULL;
206 return NULL;
207 }
208
209 pg_info->page = page;
210 pg_info->page_offset = 0;
211 skb_pg_info->page = page;
212 skb_pg_info->page_offset = 0;
213 skb_pg_info->dma = pg_info->dma;
214
215 return (void *)skb;
216 }
217
218 static inline void
recv_buffer_fast_alloc(u32 size)219 *recv_buffer_fast_alloc(u32 size)
220 {
221 struct sk_buff *skb;
222 struct octeon_skb_page_info *skb_pg_info;
223
224 skb = dev_alloc_skb(size + SKB_ADJ);
225 if (unlikely(!skb))
226 return NULL;
227
228 if ((unsigned long)skb->data & SKB_ADJ_MASK) {
229 u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
230
231 skb_reserve(skb, r);
232 }
233
234 skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
235 skb_pg_info->page = NULL;
236 skb_pg_info->page_offset = 0;
237 skb_pg_info->dma = 0;
238
239 return skb;
240 }
241
242 static inline int
recv_buffer_recycle(struct octeon_device * oct,void * buf)243 recv_buffer_recycle(struct octeon_device *oct, void *buf)
244 {
245 struct octeon_skb_page_info *pg_info = buf;
246
247 if (!pg_info->page) {
248 dev_err(&oct->pci_dev->dev, "%s: pg_info->page NULL\n",
249 __func__);
250 return -ENOMEM;
251 }
252
253 if (unlikely(page_count(pg_info->page) != 1) ||
254 unlikely(page_to_nid(pg_info->page) != numa_node_id())) {
255 dma_unmap_page(&oct->pci_dev->dev,
256 pg_info->dma, (PAGE_SIZE << 0),
257 DMA_FROM_DEVICE);
258 pg_info->dma = 0;
259 pg_info->page = NULL;
260 pg_info->page_offset = 0;
261 return -ENOMEM;
262 }
263
264 /* Flip to other half of the buffer */
265 if (pg_info->page_offset == 0)
266 pg_info->page_offset = LIO_RXBUFFER_SZ;
267 else
268 pg_info->page_offset = 0;
269 page_ref_inc(pg_info->page);
270
271 return 0;
272 }
273
274 static inline void
recv_buffer_reuse(struct octeon_device * oct,void * buf)275 *recv_buffer_reuse(struct octeon_device *oct, void *buf)
276 {
277 struct octeon_skb_page_info *pg_info = buf, *skb_pg_info;
278 struct sk_buff *skb;
279
280 skb = dev_alloc_skb(MIN_SKB_SIZE + SKB_ADJ);
281 if (unlikely(!skb)) {
282 dma_unmap_page(&oct->pci_dev->dev,
283 pg_info->dma, (PAGE_SIZE << 0),
284 DMA_FROM_DEVICE);
285 return NULL;
286 }
287
288 if ((unsigned long)skb->data & SKB_ADJ_MASK) {
289 u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
290
291 skb_reserve(skb, r);
292 }
293
294 skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
295 skb_pg_info->page = pg_info->page;
296 skb_pg_info->page_offset = pg_info->page_offset;
297 skb_pg_info->dma = pg_info->dma;
298
299 return skb;
300 }
301
302 static inline void
recv_buffer_destroy(void * buffer,struct octeon_skb_page_info * pg_info)303 recv_buffer_destroy(void *buffer, struct octeon_skb_page_info *pg_info)
304 {
305 struct sk_buff *skb = (struct sk_buff *)buffer;
306
307 put_page(pg_info->page);
308 pg_info->dma = 0;
309 pg_info->page = NULL;
310 pg_info->page_offset = 0;
311
312 if (skb)
313 dev_kfree_skb_any(skb);
314 }
315
recv_buffer_free(void * buffer)316 static inline void recv_buffer_free(void *buffer)
317 {
318 struct sk_buff *skb = (struct sk_buff *)buffer;
319 struct octeon_skb_page_info *pg_info;
320
321 pg_info = ((struct octeon_skb_page_info *)(skb->cb));
322
323 if (pg_info->page) {
324 put_page(pg_info->page);
325 pg_info->dma = 0;
326 pg_info->page = NULL;
327 pg_info->page_offset = 0;
328 }
329
330 dev_kfree_skb_any((struct sk_buff *)buffer);
331 }
332
333 static inline void
recv_buffer_fast_free(void * buffer)334 recv_buffer_fast_free(void *buffer)
335 {
336 dev_kfree_skb_any((struct sk_buff *)buffer);
337 }
338
tx_buffer_free(void * buffer)339 static inline void tx_buffer_free(void *buffer)
340 {
341 dev_kfree_skb_any((struct sk_buff *)buffer);
342 }
343
344 #define lio_dma_alloc(oct, size, dma_addr) \
345 dma_alloc_coherent(&oct->pci_dev->dev, size, dma_addr, GFP_KERNEL)
346 #define lio_dma_free(oct, size, virt_addr, dma_addr) \
347 dma_free_coherent(&oct->pci_dev->dev, size, virt_addr, dma_addr)
348
349 static inline
get_rbd(struct sk_buff * skb)350 void *get_rbd(struct sk_buff *skb)
351 {
352 struct octeon_skb_page_info *pg_info;
353 unsigned char *va;
354
355 pg_info = ((struct octeon_skb_page_info *)(skb->cb));
356 va = page_address(pg_info->page) + pg_info->page_offset;
357
358 return va;
359 }
360
361 static inline u64
lio_map_ring_info(struct octeon_droq * droq,u32 i)362 lio_map_ring_info(struct octeon_droq *droq, u32 i)
363 {
364 dma_addr_t dma_addr;
365 struct octeon_device *oct = droq->oct_dev;
366
367 dma_addr = dma_map_single(&oct->pci_dev->dev, &droq->info_list[i],
368 OCT_DROQ_INFO_SIZE, DMA_FROM_DEVICE);
369
370 WARN_ON(dma_mapping_error(&oct->pci_dev->dev, dma_addr));
371
372 return (u64)dma_addr;
373 }
374
375 static inline void
lio_unmap_ring_info(struct pci_dev * pci_dev,u64 info_ptr,u32 size)376 lio_unmap_ring_info(struct pci_dev *pci_dev,
377 u64 info_ptr, u32 size)
378 {
379 dma_unmap_single(&pci_dev->dev, info_ptr, size, DMA_FROM_DEVICE);
380 }
381
382 static inline u64
lio_map_ring(void * buf)383 lio_map_ring(void *buf)
384 {
385 dma_addr_t dma_addr;
386
387 struct sk_buff *skb = (struct sk_buff *)buf;
388 struct octeon_skb_page_info *pg_info;
389
390 pg_info = ((struct octeon_skb_page_info *)(skb->cb));
391 if (!pg_info->page) {
392 pr_err("%s: pg_info->page NULL\n", __func__);
393 WARN_ON(1);
394 }
395
396 /* Get DMA info */
397 dma_addr = pg_info->dma;
398 if (!pg_info->dma) {
399 pr_err("%s: ERROR it should be already available\n",
400 __func__);
401 WARN_ON(1);
402 }
403 dma_addr += pg_info->page_offset;
404
405 return (u64)dma_addr;
406 }
407
408 static inline void
lio_unmap_ring(struct pci_dev * pci_dev,u64 buf_ptr)409 lio_unmap_ring(struct pci_dev *pci_dev,
410 u64 buf_ptr)
411
412 {
413 dma_unmap_page(&pci_dev->dev,
414 buf_ptr, (PAGE_SIZE << 0),
415 DMA_FROM_DEVICE);
416 }
417
octeon_fast_packet_alloc(u32 size)418 static inline void *octeon_fast_packet_alloc(u32 size)
419 {
420 return recv_buffer_fast_alloc(size);
421 }
422
octeon_fast_packet_next(struct octeon_droq * droq,struct sk_buff * nicbuf,int copy_len,int idx)423 static inline void octeon_fast_packet_next(struct octeon_droq *droq,
424 struct sk_buff *nicbuf,
425 int copy_len,
426 int idx)
427 {
428 memcpy(skb_put(nicbuf, copy_len),
429 get_rbd(droq->recv_buf_list[idx].buffer), copy_len);
430 }
431
432 #endif
433