1 /*
2 * Copyright (c) 2016 Hisilicon Limited.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/dmapool.h>
34 #include <linux/platform_device.h>
35 #include "hns_roce_common.h"
36 #include "hns_roce_device.h"
37 #include "hns_roce_cmd.h"
38
39 #define CMD_POLL_TOKEN 0xffff
40 #define CMD_MAX_NUM 32
41 #define CMD_TOKEN_MASK 0x1f
42
hns_roce_cmd_mbox_post_hw(struct hns_roce_dev * hr_dev,u64 in_param,u64 out_param,u32 in_modifier,u8 op_modifier,u16 op,u16 token,int event)43 static int hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev, u64 in_param,
44 u64 out_param, u32 in_modifier,
45 u8 op_modifier, u16 op, u16 token,
46 int event)
47 {
48 struct hns_roce_cmdq *cmd = &hr_dev->cmd;
49 int ret;
50
51 mutex_lock(&cmd->hcr_mutex);
52 ret = hr_dev->hw->post_mbox(hr_dev, in_param, out_param, in_modifier,
53 op_modifier, op, token, event);
54 mutex_unlock(&cmd->hcr_mutex);
55
56 return ret;
57 }
58
59 /* this should be called with "poll_sem" */
__hns_roce_cmd_mbox_poll(struct hns_roce_dev * hr_dev,u64 in_param,u64 out_param,unsigned long in_modifier,u8 op_modifier,u16 op,unsigned long timeout)60 static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
61 u64 out_param, unsigned long in_modifier,
62 u8 op_modifier, u16 op,
63 unsigned long timeout)
64 {
65 struct device *dev = hr_dev->dev;
66 int ret;
67
68 ret = hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param,
69 in_modifier, op_modifier, op,
70 CMD_POLL_TOKEN, 0);
71 if (ret) {
72 dev_err(dev, "[cmd_poll]hns_roce_cmd_mbox_post_hw failed\n");
73 return ret;
74 }
75
76 return hr_dev->hw->chk_mbox(hr_dev, timeout);
77 }
78
hns_roce_cmd_mbox_poll(struct hns_roce_dev * hr_dev,u64 in_param,u64 out_param,unsigned long in_modifier,u8 op_modifier,u16 op,unsigned long timeout)79 static int hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
80 u64 out_param, unsigned long in_modifier,
81 u8 op_modifier, u16 op, unsigned long timeout)
82 {
83 int ret;
84
85 down(&hr_dev->cmd.poll_sem);
86 ret = __hns_roce_cmd_mbox_poll(hr_dev, in_param, out_param, in_modifier,
87 op_modifier, op, timeout);
88 up(&hr_dev->cmd.poll_sem);
89
90 return ret;
91 }
92
hns_roce_cmd_event(struct hns_roce_dev * hr_dev,u16 token,u8 status,u64 out_param)93 void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status,
94 u64 out_param)
95 {
96 struct hns_roce_cmd_context
97 *context = &hr_dev->cmd.context[token & hr_dev->cmd.token_mask];
98
99 if (token != context->token)
100 return;
101
102 context->result = (status == HNS_ROCE_CMD_SUCCESS) ? 0 : (-EIO);
103 context->out_param = out_param;
104 complete(&context->done);
105 }
106
107 /* this should be called with "use_events" */
__hns_roce_cmd_mbox_wait(struct hns_roce_dev * hr_dev,u64 in_param,u64 out_param,unsigned long in_modifier,u8 op_modifier,u16 op,unsigned long timeout)108 static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
109 u64 out_param, unsigned long in_modifier,
110 u8 op_modifier, u16 op,
111 unsigned long timeout)
112 {
113 struct hns_roce_cmdq *cmd = &hr_dev->cmd;
114 struct hns_roce_cmd_context *context;
115 struct device *dev = hr_dev->dev;
116 int ret;
117
118 spin_lock(&cmd->context_lock);
119 WARN_ON(cmd->free_head < 0);
120 context = &cmd->context[cmd->free_head];
121 context->token += cmd->token_mask + 1;
122 cmd->free_head = context->next;
123 spin_unlock(&cmd->context_lock);
124
125 init_completion(&context->done);
126
127 ret = hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param,
128 in_modifier, op_modifier, op,
129 context->token, 1);
130 if (ret)
131 goto out;
132
133 /*
134 * It is timeout when wait_for_completion_timeout return 0
135 * The return value is the time limit set in advance
136 * how many seconds showing
137 */
138 if (!wait_for_completion_timeout(&context->done,
139 msecs_to_jiffies(timeout))) {
140 dev_err(dev, "[cmd]wait_for_completion_timeout timeout\n");
141 ret = -EBUSY;
142 goto out;
143 }
144
145 ret = context->result;
146 if (ret) {
147 dev_err(dev, "[cmd]event mod cmd process error!err=%d\n", ret);
148 goto out;
149 }
150
151 out:
152 spin_lock(&cmd->context_lock);
153 context->next = cmd->free_head;
154 cmd->free_head = context - cmd->context;
155 spin_unlock(&cmd->context_lock);
156
157 return ret;
158 }
159
hns_roce_cmd_mbox_wait(struct hns_roce_dev * hr_dev,u64 in_param,u64 out_param,unsigned long in_modifier,u8 op_modifier,u16 op,unsigned long timeout)160 static int hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
161 u64 out_param, unsigned long in_modifier,
162 u8 op_modifier, u16 op, unsigned long timeout)
163 {
164 int ret;
165
166 down(&hr_dev->cmd.event_sem);
167 ret = __hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param,
168 in_modifier, op_modifier, op, timeout);
169 up(&hr_dev->cmd.event_sem);
170
171 return ret;
172 }
173
hns_roce_cmd_mbox(struct hns_roce_dev * hr_dev,u64 in_param,u64 out_param,unsigned long in_modifier,u8 op_modifier,u16 op,unsigned long timeout)174 int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param,
175 unsigned long in_modifier, u8 op_modifier, u16 op,
176 unsigned long timeout)
177 {
178 int ret;
179
180 if (hr_dev->hw->rst_prc_mbox) {
181 ret = hr_dev->hw->rst_prc_mbox(hr_dev);
182 if (ret == CMD_RST_PRC_SUCCESS)
183 return 0;
184 else if (ret == CMD_RST_PRC_EBUSY)
185 return -EBUSY;
186 }
187
188 if (hr_dev->cmd.use_events)
189 ret = hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param,
190 in_modifier, op_modifier, op,
191 timeout);
192 else
193 ret = hns_roce_cmd_mbox_poll(hr_dev, in_param, out_param,
194 in_modifier, op_modifier, op,
195 timeout);
196
197 if (ret == CMD_RST_PRC_EBUSY)
198 return -EBUSY;
199
200 if (ret && (hr_dev->hw->rst_prc_mbox &&
201 hr_dev->hw->rst_prc_mbox(hr_dev) == CMD_RST_PRC_SUCCESS))
202 return 0;
203
204 return ret;
205 }
206
hns_roce_cmd_init(struct hns_roce_dev * hr_dev)207 int hns_roce_cmd_init(struct hns_roce_dev *hr_dev)
208 {
209 struct device *dev = hr_dev->dev;
210
211 mutex_init(&hr_dev->cmd.hcr_mutex);
212 sema_init(&hr_dev->cmd.poll_sem, 1);
213 hr_dev->cmd.use_events = 0;
214 hr_dev->cmd.max_cmds = CMD_MAX_NUM;
215 hr_dev->cmd.pool = dma_pool_create("hns_roce_cmd", dev,
216 HNS_ROCE_MAILBOX_SIZE,
217 HNS_ROCE_MAILBOX_SIZE, 0);
218 if (!hr_dev->cmd.pool)
219 return -ENOMEM;
220
221 return 0;
222 }
223
hns_roce_cmd_cleanup(struct hns_roce_dev * hr_dev)224 void hns_roce_cmd_cleanup(struct hns_roce_dev *hr_dev)
225 {
226 dma_pool_destroy(hr_dev->cmd.pool);
227 }
228
hns_roce_cmd_use_events(struct hns_roce_dev * hr_dev)229 int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev)
230 {
231 struct hns_roce_cmdq *hr_cmd = &hr_dev->cmd;
232 int i;
233
234 hr_cmd->context = kmalloc_array(hr_cmd->max_cmds,
235 sizeof(*hr_cmd->context),
236 GFP_KERNEL);
237 if (!hr_cmd->context)
238 return -ENOMEM;
239
240 for (i = 0; i < hr_cmd->max_cmds; ++i) {
241 hr_cmd->context[i].token = i;
242 hr_cmd->context[i].next = i + 1;
243 }
244
245 hr_cmd->context[hr_cmd->max_cmds - 1].next = -1;
246 hr_cmd->free_head = 0;
247
248 sema_init(&hr_cmd->event_sem, hr_cmd->max_cmds);
249 spin_lock_init(&hr_cmd->context_lock);
250
251 hr_cmd->token_mask = CMD_TOKEN_MASK;
252 hr_cmd->use_events = 1;
253
254 return 0;
255 }
256
hns_roce_cmd_use_polling(struct hns_roce_dev * hr_dev)257 void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev)
258 {
259 struct hns_roce_cmdq *hr_cmd = &hr_dev->cmd;
260
261 kfree(hr_cmd->context);
262 hr_cmd->use_events = 0;
263 }
264
265 struct hns_roce_cmd_mailbox
hns_roce_alloc_cmd_mailbox(struct hns_roce_dev * hr_dev)266 *hns_roce_alloc_cmd_mailbox(struct hns_roce_dev *hr_dev)
267 {
268 struct hns_roce_cmd_mailbox *mailbox;
269
270 mailbox = kmalloc(sizeof(*mailbox), GFP_KERNEL);
271 if (!mailbox)
272 return ERR_PTR(-ENOMEM);
273
274 mailbox->buf = dma_pool_alloc(hr_dev->cmd.pool, GFP_KERNEL,
275 &mailbox->dma);
276 if (!mailbox->buf) {
277 kfree(mailbox);
278 return ERR_PTR(-ENOMEM);
279 }
280
281 return mailbox;
282 }
283
hns_roce_free_cmd_mailbox(struct hns_roce_dev * hr_dev,struct hns_roce_cmd_mailbox * mailbox)284 void hns_roce_free_cmd_mailbox(struct hns_roce_dev *hr_dev,
285 struct hns_roce_cmd_mailbox *mailbox)
286 {
287 if (!mailbox)
288 return;
289
290 dma_pool_free(hr_dev->cmd.pool, mailbox->buf, mailbox->dma);
291 kfree(mailbox);
292 }
293