1 /*
2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33 #include "c2.h"
34 #include <rdma/iw_cm.h>
35 #include "c2_vq.h"
36
37 static void handle_mq(struct c2_dev *c2dev, u32 index);
38 static void handle_vq(struct c2_dev *c2dev, u32 mq_index);
39
40 /*
41 * Handle RNIC interrupts
42 */
c2_rnic_interrupt(struct c2_dev * c2dev)43 void c2_rnic_interrupt(struct c2_dev *c2dev)
44 {
45 unsigned int mq_index;
46
47 while (c2dev->hints_read != be16_to_cpu(*c2dev->hint_count)) {
48 mq_index = readl(c2dev->regs + PCI_BAR0_HOST_HINT);
49 if (mq_index & 0x80000000) {
50 break;
51 }
52
53 c2dev->hints_read++;
54 handle_mq(c2dev, mq_index);
55 }
56
57 }
58
59 /*
60 * Top level MQ handler
61 */
handle_mq(struct c2_dev * c2dev,u32 mq_index)62 static void handle_mq(struct c2_dev *c2dev, u32 mq_index)
63 {
64 if (c2dev->qptr_array[mq_index] == NULL) {
65 pr_debug("handle_mq: stray activity for mq_index=%d\n",
66 mq_index);
67 return;
68 }
69
70 switch (mq_index) {
71 case (0):
72 /*
73 * An index of 0 in the activity queue
74 * indicates the req vq now has messages
75 * available...
76 *
77 * Wake up any waiters waiting on req VQ
78 * message availability.
79 */
80 wake_up(&c2dev->req_vq_wo);
81 break;
82 case (1):
83 handle_vq(c2dev, mq_index);
84 break;
85 case (2):
86 /* We have to purge the VQ in case there are pending
87 * accept reply requests that would result in the
88 * generation of an ESTABLISHED event. If we don't
89 * generate these first, a CLOSE event could end up
90 * being delivered before the ESTABLISHED event.
91 */
92 handle_vq(c2dev, 1);
93
94 c2_ae_event(c2dev, mq_index);
95 break;
96 default:
97 /* There is no event synchronization between CQ events
98 * and AE or CM events. In fact, CQE could be
99 * delivered for all of the I/O up to and including the
100 * FLUSH for a peer disconenct prior to the ESTABLISHED
101 * event being delivered to the app. The reason for this
102 * is that CM events are delivered on a thread, while AE
103 * and CM events are delivered on interrupt context.
104 */
105 c2_cq_event(c2dev, mq_index);
106 break;
107 }
108
109 return;
110 }
111
112 /*
113 * Handles verbs WR replies.
114 */
handle_vq(struct c2_dev * c2dev,u32 mq_index)115 static void handle_vq(struct c2_dev *c2dev, u32 mq_index)
116 {
117 void *adapter_msg, *reply_msg;
118 struct c2wr_hdr *host_msg;
119 struct c2wr_hdr tmp;
120 struct c2_mq *reply_vq;
121 struct c2_vq_req *req;
122 struct iw_cm_event cm_event;
123 int err;
124
125 reply_vq = (struct c2_mq *) c2dev->qptr_array[mq_index];
126
127 /*
128 * get next msg from mq_index into adapter_msg.
129 * don't free it yet.
130 */
131 adapter_msg = c2_mq_consume(reply_vq);
132 if (adapter_msg == NULL) {
133 return;
134 }
135
136 host_msg = vq_repbuf_alloc(c2dev);
137
138 /*
139 * If we can't get a host buffer, then we'll still
140 * wakeup the waiter, we just won't give him the msg.
141 * It is assumed the waiter will deal with this...
142 */
143 if (!host_msg) {
144 pr_debug("handle_vq: no repbufs!\n");
145
146 /*
147 * just copy the WR header into a local variable.
148 * this allows us to still demux on the context
149 */
150 host_msg = &tmp;
151 memcpy(host_msg, adapter_msg, sizeof(tmp));
152 reply_msg = NULL;
153 } else {
154 memcpy(host_msg, adapter_msg, reply_vq->msg_size);
155 reply_msg = host_msg;
156 }
157
158 /*
159 * consume the msg from the MQ
160 */
161 c2_mq_free(reply_vq);
162
163 /*
164 * wakeup the waiter.
165 */
166 req = (struct c2_vq_req *) (unsigned long) host_msg->context;
167 if (req == NULL) {
168 /*
169 * We should never get here, as the adapter should
170 * never send us a reply that we're not expecting.
171 */
172 if (reply_msg != NULL)
173 vq_repbuf_free(c2dev, host_msg);
174 pr_debug("handle_vq: UNEXPECTEDLY got NULL req\n");
175 return;
176 }
177
178 if (reply_msg)
179 err = c2_errno(reply_msg);
180 else
181 err = -ENOMEM;
182
183 if (!err) switch (req->event) {
184 case IW_CM_EVENT_ESTABLISHED:
185 c2_set_qp_state(req->qp,
186 C2_QP_STATE_RTS);
187 /*
188 * Until ird/ord negotiation via MPAv2 support is added, send
189 * max supported values
190 */
191 cm_event.ird = cm_event.ord = 128;
192 case IW_CM_EVENT_CLOSE:
193
194 /*
195 * Move the QP to RTS if this is
196 * the established event
197 */
198 cm_event.event = req->event;
199 cm_event.status = 0;
200 cm_event.local_addr = req->cm_id->local_addr;
201 cm_event.remote_addr = req->cm_id->remote_addr;
202 cm_event.private_data = NULL;
203 cm_event.private_data_len = 0;
204 req->cm_id->event_handler(req->cm_id, &cm_event);
205 break;
206 default:
207 break;
208 }
209
210 req->reply_msg = (u64) (unsigned long) (reply_msg);
211 atomic_set(&req->reply_ready, 1);
212 wake_up(&req->wait_object);
213
214 /*
215 * If the request was cancelled, then this put will
216 * free the vq_req memory...and reply_msg!!!
217 */
218 vq_req_put(c2dev, req);
219 }
220