1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2017-2019 NXP */
3
4 #include "enetc.h"
5
enetc_clean_cbdr(struct enetc_si * si)6 static void enetc_clean_cbdr(struct enetc_si *si)
7 {
8 struct enetc_cbdr *ring = &si->cbd_ring;
9 struct enetc_cbd *dest_cbd;
10 int i, status;
11
12 i = ring->next_to_clean;
13
14 while (enetc_rd_reg(ring->cir) != i) {
15 dest_cbd = ENETC_CBD(*ring, i);
16 status = dest_cbd->status_flags & ENETC_CBD_STATUS_MASK;
17 if (status)
18 dev_warn(&si->pdev->dev, "CMD err %04x for cmd %04x\n",
19 status, dest_cbd->cmd);
20
21 memset(dest_cbd, 0, sizeof(*dest_cbd));
22
23 i = (i + 1) % ring->bd_count;
24 }
25
26 ring->next_to_clean = i;
27 }
28
enetc_cbd_unused(struct enetc_cbdr * r)29 static int enetc_cbd_unused(struct enetc_cbdr *r)
30 {
31 return (r->next_to_clean - r->next_to_use - 1 + r->bd_count) %
32 r->bd_count;
33 }
34
enetc_send_cmd(struct enetc_si * si,struct enetc_cbd * cbd)35 static int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd)
36 {
37 struct enetc_cbdr *ring = &si->cbd_ring;
38 int timeout = ENETC_CBDR_TIMEOUT;
39 struct enetc_cbd *dest_cbd;
40 int i;
41
42 if (unlikely(!ring->bd_base))
43 return -EIO;
44
45 if (unlikely(!enetc_cbd_unused(ring)))
46 enetc_clean_cbdr(si);
47
48 i = ring->next_to_use;
49 dest_cbd = ENETC_CBD(*ring, i);
50
51 /* copy command to the ring */
52 *dest_cbd = *cbd;
53 i = (i + 1) % ring->bd_count;
54
55 ring->next_to_use = i;
56 /* let H/W know BD ring has been updated */
57 enetc_wr_reg(ring->pir, i);
58
59 do {
60 if (enetc_rd_reg(ring->cir) == i)
61 break;
62 udelay(10); /* cannot sleep, rtnl_lock() */
63 timeout -= 10;
64 } while (timeout);
65
66 if (!timeout)
67 return -EBUSY;
68
69 enetc_clean_cbdr(si);
70
71 return 0;
72 }
73
enetc_clear_mac_flt_entry(struct enetc_si * si,int index)74 int enetc_clear_mac_flt_entry(struct enetc_si *si, int index)
75 {
76 struct enetc_cbd cbd;
77
78 memset(&cbd, 0, sizeof(cbd));
79
80 cbd.cls = 1;
81 cbd.status_flags = ENETC_CBD_FLAGS_SF;
82 cbd.index = cpu_to_le16(index);
83
84 return enetc_send_cmd(si, &cbd);
85 }
86
enetc_set_mac_flt_entry(struct enetc_si * si,int index,char * mac_addr,int si_map)87 int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
88 char *mac_addr, int si_map)
89 {
90 struct enetc_cbd cbd;
91 u32 upper;
92 u16 lower;
93
94 memset(&cbd, 0, sizeof(cbd));
95
96 /* fill up the "set" descriptor */
97 cbd.cls = 1;
98 cbd.status_flags = ENETC_CBD_FLAGS_SF;
99 cbd.index = cpu_to_le16(index);
100 cbd.opt[3] = cpu_to_le32(si_map);
101 /* enable entry */
102 cbd.opt[0] = cpu_to_le32(BIT(31));
103
104 upper = *(const u32 *)mac_addr;
105 lower = *(const u16 *)(mac_addr + 4);
106 cbd.addr[0] = cpu_to_le32(upper);
107 cbd.addr[1] = cpu_to_le32(lower);
108
109 return enetc_send_cmd(si, &cbd);
110 }
111
112 #define RFSE_ALIGN 64
113 /* Set entry in RFS table */
enetc_set_fs_entry(struct enetc_si * si,struct enetc_cmd_rfse * rfse,int index)114 int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
115 int index)
116 {
117 struct enetc_cbd cbd = {.cmd = 0};
118 dma_addr_t dma, dma_align;
119 void *tmp, *tmp_align;
120 int err;
121
122 /* fill up the "set" descriptor */
123 cbd.cmd = 0;
124 cbd.cls = 4;
125 cbd.index = cpu_to_le16(index);
126 cbd.length = cpu_to_le16(sizeof(*rfse));
127 cbd.opt[3] = cpu_to_le32(0); /* SI */
128
129 tmp = dma_alloc_coherent(&si->pdev->dev, sizeof(*rfse) + RFSE_ALIGN,
130 &dma, GFP_KERNEL);
131 if (!tmp) {
132 dev_err(&si->pdev->dev, "DMA mapping of RFS entry failed!\n");
133 return -ENOMEM;
134 }
135
136 dma_align = ALIGN(dma, RFSE_ALIGN);
137 tmp_align = PTR_ALIGN(tmp, RFSE_ALIGN);
138 memcpy(tmp_align, rfse, sizeof(*rfse));
139
140 cbd.addr[0] = cpu_to_le32(lower_32_bits(dma_align));
141 cbd.addr[1] = cpu_to_le32(upper_32_bits(dma_align));
142
143 err = enetc_send_cmd(si, &cbd);
144 if (err)
145 dev_err(&si->pdev->dev, "FS entry add failed (%d)!", err);
146
147 dma_free_coherent(&si->pdev->dev, sizeof(*rfse) + RFSE_ALIGN,
148 tmp, dma);
149
150 return err;
151 }
152
153 #define RSSE_ALIGN 64
enetc_cmd_rss_table(struct enetc_si * si,u32 * table,int count,bool read)154 static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count,
155 bool read)
156 {
157 struct enetc_cbd cbd = {.cmd = 0};
158 dma_addr_t dma, dma_align;
159 u8 *tmp, *tmp_align;
160 int err, i;
161
162 if (count < RSSE_ALIGN)
163 /* HW only takes in a full 64 entry table */
164 return -EINVAL;
165
166 tmp = dma_alloc_coherent(&si->pdev->dev, count + RSSE_ALIGN,
167 &dma, GFP_KERNEL);
168 if (!tmp) {
169 dev_err(&si->pdev->dev, "DMA mapping of RSS table failed!\n");
170 return -ENOMEM;
171 }
172 dma_align = ALIGN(dma, RSSE_ALIGN);
173 tmp_align = PTR_ALIGN(tmp, RSSE_ALIGN);
174
175 if (!read)
176 for (i = 0; i < count; i++)
177 tmp_align[i] = (u8)(table[i]);
178
179 /* fill up the descriptor */
180 cbd.cmd = read ? 2 : 1;
181 cbd.cls = 3;
182 cbd.length = cpu_to_le16(count);
183
184 cbd.addr[0] = cpu_to_le32(lower_32_bits(dma_align));
185 cbd.addr[1] = cpu_to_le32(upper_32_bits(dma_align));
186
187 err = enetc_send_cmd(si, &cbd);
188 if (err)
189 dev_err(&si->pdev->dev, "RSS cmd failed (%d)!", err);
190
191 if (read)
192 for (i = 0; i < count; i++)
193 table[i] = tmp_align[i];
194
195 dma_free_coherent(&si->pdev->dev, count + RSSE_ALIGN, tmp, dma);
196
197 return err;
198 }
199
200 /* Get RSS table */
enetc_get_rss_table(struct enetc_si * si,u32 * table,int count)201 int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count)
202 {
203 return enetc_cmd_rss_table(si, table, count, true);
204 }
205
206 /* Set RSS table */
enetc_set_rss_table(struct enetc_si * si,const u32 * table,int count)207 int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count)
208 {
209 return enetc_cmd_rss_table(si, (u32 *)table, count, false);
210 }
211