1 /*
2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32 #ifndef _CHELSIO_L2T_H
33 #define _CHELSIO_L2T_H
34
35 #include <linux/spinlock.h>
36 #include "t3cdev.h"
37 #include <linux/atomic.h>
38
39 enum {
40 L2T_STATE_VALID, /* entry is up to date */
41 L2T_STATE_STALE, /* entry may be used but needs revalidation */
42 L2T_STATE_RESOLVING, /* entry needs address resolution */
43 L2T_STATE_UNUSED /* entry not in use */
44 };
45
46 struct neighbour;
47 struct sk_buff;
48
49 /*
50 * Each L2T entry plays multiple roles. First of all, it keeps state for the
51 * corresponding entry of the HW L2 table and maintains a queue of offload
52 * packets awaiting address resolution. Second, it is a node of a hash table
53 * chain, where the nodes of the chain are linked together through their next
54 * pointer. Finally, each node is a bucket of a hash table, pointing to the
55 * first element in its chain through its first pointer.
56 */
57 struct l2t_entry {
58 u16 state; /* entry state */
59 u16 idx; /* entry index */
60 u32 addr; /* dest IP address */
61 int ifindex; /* neighbor's net_device's ifindex */
62 u16 smt_idx; /* SMT index */
63 u16 vlan; /* VLAN TCI (id: bits 0-11, prio: 13-15 */
64 struct neighbour *neigh; /* associated neighbour */
65 struct l2t_entry *first; /* start of hash chain */
66 struct l2t_entry *next; /* next l2t_entry on chain */
67 struct sk_buff_head arpq; /* queue of packets awaiting resolution */
68 spinlock_t lock;
69 atomic_t refcnt; /* entry reference count */
70 u8 dmac[6]; /* neighbour's MAC address */
71 };
72
73 struct l2t_data {
74 unsigned int nentries; /* number of entries */
75 struct l2t_entry *rover; /* starting point for next allocation */
76 atomic_t nfree; /* number of free entries */
77 rwlock_t lock;
78 struct rcu_head rcu_head; /* to handle rcu cleanup */
79 struct l2t_entry l2tab[];
80 };
81
82 typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
83 struct sk_buff * skb);
84
85 /*
86 * Callback stored in an skb to handle address resolution failure.
87 */
88 struct l2t_skb_cb {
89 arp_failure_handler_func arp_failure_handler;
90 };
91
92 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
93
set_arp_failure_handler(struct sk_buff * skb,arp_failure_handler_func hnd)94 static inline void set_arp_failure_handler(struct sk_buff *skb,
95 arp_failure_handler_func hnd)
96 {
97 L2T_SKB_CB(skb)->arp_failure_handler = hnd;
98 }
99
100 /*
101 * Getting to the L2 data from an offload device.
102 */
103 #define L2DATA(cdev) (rcu_dereference((cdev)->l2opt))
104
105 #define W_TCB_L2T_IX 0
106 #define S_TCB_L2T_IX 7
107 #define M_TCB_L2T_IX 0x7ffULL
108 #define V_TCB_L2T_IX(x) ((x) << S_TCB_L2T_IX)
109
110 void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e);
111 void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh);
112 struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
113 struct net_device *dev, const void *daddr);
114 int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
115 struct l2t_entry *e);
116 void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e);
117 struct l2t_data *t3_init_l2t(unsigned int l2t_capacity);
118 void t3_free_l2t(struct l2t_data *d);
119
120 int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb);
121
l2t_send(struct t3cdev * dev,struct sk_buff * skb,struct l2t_entry * e)122 static inline int l2t_send(struct t3cdev *dev, struct sk_buff *skb,
123 struct l2t_entry *e)
124 {
125 if (likely(e->state == L2T_STATE_VALID))
126 return cxgb3_ofld_send(dev, skb);
127 return t3_l2t_send_slow(dev, skb, e);
128 }
129
l2t_release(struct t3cdev * t,struct l2t_entry * e)130 static inline void l2t_release(struct t3cdev *t, struct l2t_entry *e)
131 {
132 struct l2t_data *d;
133
134 rcu_read_lock();
135 d = L2DATA(t);
136
137 if (atomic_dec_and_test(&e->refcnt) && d)
138 t3_l2e_free(d, e);
139
140 rcu_read_unlock();
141 }
142
l2t_hold(struct l2t_data * d,struct l2t_entry * e)143 static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
144 {
145 if (d && atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */
146 atomic_dec(&d->nfree);
147 }
148
149 #endif
150