1 /*
2 * Copyright (C) 2008 Michael Brown <mbrown@fensystems.co.uk>.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19 FILE_LICENCE ( GPL2_OR_LATER );
20
21 #include <stdint.h>
22 #include <stdlib.h>
23 #include <errno.h>
24 #include <unistd.h>
25 #include <assert.h>
26 #include <gpxe/io.h>
27 #include <gpxe/pci.h>
28 #include <gpxe/infiniband.h>
29 #include <gpxe/i2c.h>
30 #include <gpxe/bitbash.h>
31 #include <gpxe/malloc.h>
32 #include <gpxe/iobuf.h>
33 #include "linda.h"
34
35 /**
36 * @file
37 *
38 * QLogic Linda Infiniband HCA
39 *
40 */
41
42 /** A Linda send work queue */
43 struct linda_send_work_queue {
44 /** Send buffer usage */
45 uint8_t *send_buf;
46 /** Producer index */
47 unsigned int prod;
48 /** Consumer index */
49 unsigned int cons;
50 };
51
52 /** A Linda receive work queue */
53 struct linda_recv_work_queue {
54 /** Receive header ring */
55 void *header;
56 /** Receive header producer offset (written by hardware) */
57 struct QIB_7220_scalar header_prod;
58 /** Receive header consumer offset */
59 unsigned int header_cons;
60 /** Offset within register space of the eager array */
61 unsigned long eager_array;
62 /** Number of entries in eager array */
63 unsigned int eager_entries;
64 /** Eager array producer index */
65 unsigned int eager_prod;
66 /** Eager array consumer index */
67 unsigned int eager_cons;
68 };
69
70 /** A Linda HCA */
71 struct linda {
72 /** Registers */
73 void *regs;
74
75 /** In-use contexts */
76 uint8_t used_ctx[LINDA_NUM_CONTEXTS];
77 /** Send work queues */
78 struct linda_send_work_queue send_wq[LINDA_NUM_CONTEXTS];
79 /** Receive work queues */
80 struct linda_recv_work_queue recv_wq[LINDA_NUM_CONTEXTS];
81
82 /** Offset within register space of the first send buffer */
83 unsigned long send_buffer_base;
84 /** Send buffer availability (reported by hardware) */
85 struct QIB_7220_SendBufAvail *sendbufavail;
86 /** Send buffer availability (maintained by software) */
87 uint8_t send_buf[LINDA_MAX_SEND_BUFS];
88 /** Send buffer availability producer counter */
89 unsigned int send_buf_prod;
90 /** Send buffer availability consumer counter */
91 unsigned int send_buf_cons;
92 /** Number of reserved send buffers (across all QPs) */
93 unsigned int reserved_send_bufs;
94
95 /** I2C bit-bashing interface */
96 struct i2c_bit_basher i2c;
97 /** I2C serial EEPROM */
98 struct i2c_device eeprom;
99 };
100
101 /***************************************************************************
102 *
103 * Linda register access
104 *
105 ***************************************************************************
106 *
107 * This card requires atomic 64-bit accesses. Strange things happen
108 * if you try to use 32-bit accesses; sometimes they work, sometimes
109 * they don't, sometimes you get random data.
110 *
111 * These accessors use the "movq" MMX instruction, and so won't work
112 * on really old Pentiums (which won't have PCIe anyway, so this is
113 * something of a moot point).
114 */
115
116 /**
117 * Read Linda qword register
118 *
119 * @v linda Linda device
120 * @v dwords Register buffer to read into
121 * @v offset Register offset
122 */
linda_readq(struct linda * linda,uint32_t * dwords,unsigned long offset)123 static void linda_readq ( struct linda *linda, uint32_t *dwords,
124 unsigned long offset ) {
125 void *addr = ( linda->regs + offset );
126
127 __asm__ __volatile__ ( "movq (%1), %%mm0\n\t"
128 "movq %%mm0, (%0)\n\t"
129 : : "r" ( dwords ), "r" ( addr ) : "memory" );
130
131 DBGIO ( "[%08lx] => %08x%08x\n",
132 virt_to_phys ( addr ), dwords[1], dwords[0] );
133 }
134 #define linda_readq( _linda, _ptr, _offset ) \
135 linda_readq ( (_linda), (_ptr)->u.dwords, (_offset) )
136 #define linda_readq_array8b( _linda, _ptr, _offset, _idx ) \
137 linda_readq ( (_linda), (_ptr), ( (_offset) + ( (_idx) * 8 ) ) )
138 #define linda_readq_array64k( _linda, _ptr, _offset, _idx ) \
139 linda_readq ( (_linda), (_ptr), ( (_offset) + ( (_idx) * 65536 ) ) )
140
141 /**
142 * Write Linda qword register
143 *
144 * @v linda Linda device
145 * @v dwords Register buffer to write
146 * @v offset Register offset
147 */
linda_writeq(struct linda * linda,const uint32_t * dwords,unsigned long offset)148 static void linda_writeq ( struct linda *linda, const uint32_t *dwords,
149 unsigned long offset ) {
150 void *addr = ( linda->regs + offset );
151
152 DBGIO ( "[%08lx] <= %08x%08x\n",
153 virt_to_phys ( addr ), dwords[1], dwords[0] );
154
155 __asm__ __volatile__ ( "movq (%0), %%mm0\n\t"
156 "movq %%mm0, (%1)\n\t"
157 : : "r" ( dwords ), "r" ( addr ) : "memory" );
158 }
159 #define linda_writeq( _linda, _ptr, _offset ) \
160 linda_writeq ( (_linda), (_ptr)->u.dwords, (_offset) )
161 #define linda_writeq_array8b( _linda, _ptr, _offset, _idx ) \
162 linda_writeq ( (_linda), (_ptr), ( (_offset) + ( (_idx) * 8 ) ) )
163 #define linda_writeq_array64k( _linda, _ptr, _offset, _idx ) \
164 linda_writeq ( (_linda), (_ptr), ( (_offset) + ( (_idx) * 65536 ) ) )
165
166 /**
167 * Write Linda dword register
168 *
169 * @v linda Linda device
170 * @v dword Value to write
171 * @v offset Register offset
172 */
linda_writel(struct linda * linda,uint32_t dword,unsigned long offset)173 static void linda_writel ( struct linda *linda, uint32_t dword,
174 unsigned long offset ) {
175 writel ( dword, ( linda->regs + offset ) );
176 }
177
178 /***************************************************************************
179 *
180 * Link state management
181 *
182 ***************************************************************************
183 */
184
185 /**
186 * Textual representation of link state
187 *
188 * @v link_state Link state
189 * @ret link_text Link state text
190 */
linda_link_state_text(unsigned int link_state)191 static const char * linda_link_state_text ( unsigned int link_state ) {
192 switch ( link_state ) {
193 case LINDA_LINK_STATE_DOWN: return "DOWN";
194 case LINDA_LINK_STATE_INIT: return "INIT";
195 case LINDA_LINK_STATE_ARM: return "ARM";
196 case LINDA_LINK_STATE_ACTIVE: return "ACTIVE";
197 case LINDA_LINK_STATE_ACT_DEFER:return "ACT_DEFER";
198 default: return "UNKNOWN";
199 }
200 }
201
202 /**
203 * Handle link state change
204 *
205 * @v linda Linda device
206 */
linda_link_state_changed(struct ib_device * ibdev)207 static void linda_link_state_changed ( struct ib_device *ibdev ) {
208 struct linda *linda = ib_get_drvdata ( ibdev );
209 struct QIB_7220_IBCStatus ibcstatus;
210 struct QIB_7220_EXTCtrl extctrl;
211 unsigned int link_state;
212 unsigned int link_width;
213 unsigned int link_speed;
214
215 /* Read link state */
216 linda_readq ( linda, &ibcstatus, QIB_7220_IBCStatus_offset );
217 link_state = BIT_GET ( &ibcstatus, LinkState );
218 link_width = BIT_GET ( &ibcstatus, LinkWidthActive );
219 link_speed = BIT_GET ( &ibcstatus, LinkSpeedActive );
220 DBGC ( linda, "Linda %p link state %s (%s %s)\n", linda,
221 linda_link_state_text ( link_state ),
222 ( link_speed ? "DDR" : "SDR" ), ( link_width ? "x4" : "x1" ) );
223
224 /* Set LEDs according to link state */
225 linda_readq ( linda, &extctrl, QIB_7220_EXTCtrl_offset );
226 BIT_SET ( &extctrl, LEDPriPortGreenOn,
227 ( ( link_state >= LINDA_LINK_STATE_INIT ) ? 1 : 0 ) );
228 BIT_SET ( &extctrl, LEDPriPortYellowOn,
229 ( ( link_state >= LINDA_LINK_STATE_ACTIVE ) ? 1 : 0 ) );
230 linda_writeq ( linda, &extctrl, QIB_7220_EXTCtrl_offset );
231
232 /* Notify Infiniband core of link state change */
233 ibdev->port_state = ( link_state + 1 );
234 ibdev->link_width_active =
235 ( link_width ? IB_LINK_WIDTH_4X : IB_LINK_WIDTH_1X );
236 ibdev->link_speed_active =
237 ( link_speed ? IB_LINK_SPEED_DDR : IB_LINK_SPEED_SDR );
238 ib_link_state_changed ( ibdev );
239 }
240
241 /**
242 * Wait for link state change to take effect
243 *
244 * @v linda Linda device
245 * @v new_link_state Expected link state
246 * @ret rc Return status code
247 */
linda_link_state_check(struct linda * linda,unsigned int new_link_state)248 static int linda_link_state_check ( struct linda *linda,
249 unsigned int new_link_state ) {
250 struct QIB_7220_IBCStatus ibcstatus;
251 unsigned int link_state;
252 unsigned int i;
253
254 for ( i = 0 ; i < LINDA_LINK_STATE_MAX_WAIT_US ; i++ ) {
255 linda_readq ( linda, &ibcstatus, QIB_7220_IBCStatus_offset );
256 link_state = BIT_GET ( &ibcstatus, LinkState );
257 if ( link_state == new_link_state )
258 return 0;
259 udelay ( 1 );
260 }
261
262 DBGC ( linda, "Linda %p timed out waiting for link state %s\n",
263 linda, linda_link_state_text ( link_state ) );
264 return -ETIMEDOUT;
265 }
266
267 /**
268 * Set port information
269 *
270 * @v ibdev Infiniband device
271 * @v mad Set port information MAD
272 */
linda_set_port_info(struct ib_device * ibdev,union ib_mad * mad)273 static int linda_set_port_info ( struct ib_device *ibdev, union ib_mad *mad ) {
274 struct linda *linda = ib_get_drvdata ( ibdev );
275 struct ib_port_info *port_info = &mad->smp.smp_data.port_info;
276 struct QIB_7220_IBCCtrl ibcctrl;
277 unsigned int port_state;
278 unsigned int link_state;
279
280 /* Set new link state */
281 port_state = ( port_info->link_speed_supported__port_state & 0xf );
282 if ( port_state ) {
283 link_state = ( port_state - 1 );
284 DBGC ( linda, "Linda %p set link state to %s (%x)\n", linda,
285 linda_link_state_text ( link_state ), link_state );
286 linda_readq ( linda, &ibcctrl, QIB_7220_IBCCtrl_offset );
287 BIT_SET ( &ibcctrl, LinkCmd, link_state );
288 linda_writeq ( linda, &ibcctrl, QIB_7220_IBCCtrl_offset );
289
290 /* Wait for link state change to take effect. Ignore
291 * errors; the current link state will be returned via
292 * the GetResponse MAD.
293 */
294 linda_link_state_check ( linda, link_state );
295 }
296
297 /* Detect and report link state change */
298 linda_link_state_changed ( ibdev );
299
300 return 0;
301 }
302
303 /**
304 * Set partition key table
305 *
306 * @v ibdev Infiniband device
307 * @v mad Set partition key table MAD
308 */
linda_set_pkey_table(struct ib_device * ibdev __unused,union ib_mad * mad __unused)309 static int linda_set_pkey_table ( struct ib_device *ibdev __unused,
310 union ib_mad *mad __unused ) {
311 /* Nothing to do */
312 return 0;
313 }
314
315 /***************************************************************************
316 *
317 * Context allocation
318 *
319 ***************************************************************************
320 */
321
322 /**
323 * Map context number to QPN
324 *
325 * @v ctx Context index
326 * @ret qpn Queue pair number
327 */
linda_ctx_to_qpn(unsigned int ctx)328 static int linda_ctx_to_qpn ( unsigned int ctx ) {
329 /* This mapping is fixed by hardware */
330 return ( ctx * 2 );
331 }
332
333 /**
334 * Map QPN to context number
335 *
336 * @v qpn Queue pair number
337 * @ret ctx Context index
338 */
linda_qpn_to_ctx(unsigned int qpn)339 static int linda_qpn_to_ctx ( unsigned int qpn ) {
340 /* This mapping is fixed by hardware */
341 return ( qpn / 2 );
342 }
343
344 /**
345 * Allocate a context
346 *
347 * @v linda Linda device
348 * @ret ctx Context index, or negative error
349 */
linda_alloc_ctx(struct linda * linda)350 static int linda_alloc_ctx ( struct linda *linda ) {
351 unsigned int ctx;
352
353 for ( ctx = 0 ; ctx < LINDA_NUM_CONTEXTS ; ctx++ ) {
354
355 if ( ! linda->used_ctx[ctx] ) {
356 linda->used_ctx[ctx ] = 1;
357 DBGC2 ( linda, "Linda %p CTX %d allocated\n",
358 linda, ctx );
359 return ctx;
360 }
361 }
362
363 DBGC ( linda, "Linda %p out of available contexts\n", linda );
364 return -ENOENT;
365 }
366
367 /**
368 * Free a context
369 *
370 * @v linda Linda device
371 * @v ctx Context index
372 */
linda_free_ctx(struct linda * linda,unsigned int ctx)373 static void linda_free_ctx ( struct linda *linda, unsigned int ctx ) {
374
375 linda->used_ctx[ctx] = 0;
376 DBGC2 ( linda, "Linda %p CTX %d freed\n", linda, ctx );
377 }
378
379 /***************************************************************************
380 *
381 * Send datapath
382 *
383 ***************************************************************************
384 */
385
386 /** Send buffer toggle bit
387 *
388 * We encode send buffers as 7 bits of send buffer index plus a single
389 * bit which should match the "check" bit in the SendBufAvail array.
390 */
391 #define LINDA_SEND_BUF_TOGGLE 0x80
392
393 /**
394 * Allocate a send buffer
395 *
396 * @v linda Linda device
397 * @ret send_buf Send buffer
398 *
399 * You must guarantee that a send buffer is available. This is done
400 * by refusing to allocate more TX WQEs in total than the number of
401 * available send buffers.
402 */
linda_alloc_send_buf(struct linda * linda)403 static unsigned int linda_alloc_send_buf ( struct linda *linda ) {
404 unsigned int send_buf;
405
406 send_buf = linda->send_buf[linda->send_buf_cons];
407 send_buf ^= LINDA_SEND_BUF_TOGGLE;
408 linda->send_buf_cons = ( ( linda->send_buf_cons + 1 ) %
409 LINDA_MAX_SEND_BUFS );
410 return send_buf;
411 }
412
413 /**
414 * Free a send buffer
415 *
416 * @v linda Linda device
417 * @v send_buf Send buffer
418 */
linda_free_send_buf(struct linda * linda,unsigned int send_buf)419 static void linda_free_send_buf ( struct linda *linda,
420 unsigned int send_buf ) {
421 linda->send_buf[linda->send_buf_prod] = send_buf;
422 linda->send_buf_prod = ( ( linda->send_buf_prod + 1 ) %
423 LINDA_MAX_SEND_BUFS );
424 }
425
426 /**
427 * Check to see if send buffer is in use
428 *
429 * @v linda Linda device
430 * @v send_buf Send buffer
431 * @ret in_use Send buffer is in use
432 */
linda_send_buf_in_use(struct linda * linda,unsigned int send_buf)433 static int linda_send_buf_in_use ( struct linda *linda,
434 unsigned int send_buf ) {
435 unsigned int send_idx;
436 unsigned int send_check;
437 unsigned int inusecheck;
438 unsigned int inuse;
439 unsigned int check;
440
441 send_idx = ( send_buf & ~LINDA_SEND_BUF_TOGGLE );
442 send_check = ( !! ( send_buf & LINDA_SEND_BUF_TOGGLE ) );
443 inusecheck = BIT_GET ( linda->sendbufavail, InUseCheck[send_idx] );
444 inuse = ( !! ( inusecheck & 0x02 ) );
445 check = ( !! ( inusecheck & 0x01 ) );
446 return ( inuse || ( check != send_check ) );
447 }
448
449 /**
450 * Calculate starting offset for send buffer
451 *
452 * @v linda Linda device
453 * @v send_buf Send buffer
454 * @ret offset Starting offset
455 */
linda_send_buffer_offset(struct linda * linda,unsigned int send_buf)456 static unsigned long linda_send_buffer_offset ( struct linda *linda,
457 unsigned int send_buf ) {
458 return ( linda->send_buffer_base +
459 ( ( send_buf & ~LINDA_SEND_BUF_TOGGLE ) *
460 LINDA_SEND_BUF_SIZE ) );
461 }
462
463 /**
464 * Create send work queue
465 *
466 * @v linda Linda device
467 * @v qp Queue pair
468 */
linda_create_send_wq(struct linda * linda,struct ib_queue_pair * qp)469 static int linda_create_send_wq ( struct linda *linda,
470 struct ib_queue_pair *qp ) {
471 struct ib_work_queue *wq = &qp->send;
472 struct linda_send_work_queue *linda_wq = ib_wq_get_drvdata ( wq );
473 int rc;
474
475 /* Reserve send buffers */
476 if ( ( linda->reserved_send_bufs + qp->send.num_wqes ) >
477 LINDA_MAX_SEND_BUFS ) {
478 DBGC ( linda, "Linda %p out of send buffers (have %d, used "
479 "%d, need %d)\n", linda, LINDA_MAX_SEND_BUFS,
480 linda->reserved_send_bufs, qp->send.num_wqes );
481 rc = -ENOBUFS;
482 goto err_reserve_bufs;
483 }
484 linda->reserved_send_bufs += qp->send.num_wqes;
485
486 /* Reset work queue */
487 linda_wq->prod = 0;
488 linda_wq->cons = 0;
489
490 /* Allocate space for send buffer uasge list */
491 linda_wq->send_buf = zalloc ( qp->send.num_wqes *
492 sizeof ( linda_wq->send_buf[0] ) );
493 if ( ! linda_wq->send_buf ) {
494 rc = -ENOBUFS;
495 goto err_alloc_send_buf;
496 }
497
498 return 0;
499
500 free ( linda_wq->send_buf );
501 err_alloc_send_buf:
502 linda->reserved_send_bufs -= qp->send.num_wqes;
503 err_reserve_bufs:
504 return rc;
505 }
506
507 /**
508 * Destroy send work queue
509 *
510 * @v linda Linda device
511 * @v qp Queue pair
512 */
linda_destroy_send_wq(struct linda * linda,struct ib_queue_pair * qp)513 static void linda_destroy_send_wq ( struct linda *linda,
514 struct ib_queue_pair *qp ) {
515 struct ib_work_queue *wq = &qp->send;
516 struct linda_send_work_queue *linda_wq = ib_wq_get_drvdata ( wq );
517
518 free ( linda_wq->send_buf );
519 linda->reserved_send_bufs -= qp->send.num_wqes;
520 }
521
522 /**
523 * Initialise send datapath
524 *
525 * @v linda Linda device
526 * @ret rc Return status code
527 */
linda_init_send(struct linda * linda)528 static int linda_init_send ( struct linda *linda ) {
529 struct QIB_7220_SendBufBase sendbufbase;
530 struct QIB_7220_SendBufAvailAddr sendbufavailaddr;
531 struct QIB_7220_SendCtrl sendctrl;
532 unsigned int i;
533 int rc;
534
535 /* Retrieve SendBufBase */
536 linda_readq ( linda, &sendbufbase, QIB_7220_SendBufBase_offset );
537 linda->send_buffer_base = BIT_GET ( &sendbufbase,
538 BaseAddr_SmallPIO );
539 DBGC ( linda, "Linda %p send buffers at %lx\n",
540 linda, linda->send_buffer_base );
541
542 /* Initialise the send_buf[] array */
543 for ( i = 0 ; i < LINDA_MAX_SEND_BUFS ; i++ )
544 linda->send_buf[i] = i;
545
546 /* Allocate space for the SendBufAvail array */
547 linda->sendbufavail = malloc_dma ( sizeof ( *linda->sendbufavail ),
548 LINDA_SENDBUFAVAIL_ALIGN );
549 if ( ! linda->sendbufavail ) {
550 rc = -ENOMEM;
551 goto err_alloc_sendbufavail;
552 }
553 memset ( linda->sendbufavail, 0, sizeof ( linda->sendbufavail ) );
554
555 /* Program SendBufAvailAddr into the hardware */
556 memset ( &sendbufavailaddr, 0, sizeof ( sendbufavailaddr ) );
557 BIT_FILL_1 ( &sendbufavailaddr, SendBufAvailAddr,
558 ( virt_to_bus ( linda->sendbufavail ) >> 6 ) );
559 linda_writeq ( linda, &sendbufavailaddr,
560 QIB_7220_SendBufAvailAddr_offset );
561
562 /* Enable sending and DMA of SendBufAvail */
563 memset ( &sendctrl, 0, sizeof ( sendctrl ) );
564 BIT_FILL_2 ( &sendctrl,
565 SendBufAvailUpd, 1,
566 SPioEnable, 1 );
567 linda_writeq ( linda, &sendctrl, QIB_7220_SendCtrl_offset );
568
569 return 0;
570
571 free_dma ( linda->sendbufavail, sizeof ( *linda->sendbufavail ) );
572 err_alloc_sendbufavail:
573 return rc;
574 }
575
576 /**
577 * Shut down send datapath
578 *
579 * @v linda Linda device
580 */
linda_fini_send(struct linda * linda)581 static void linda_fini_send ( struct linda *linda ) {
582 struct QIB_7220_SendCtrl sendctrl;
583
584 /* Disable sending and DMA of SendBufAvail */
585 memset ( &sendctrl, 0, sizeof ( sendctrl ) );
586 linda_writeq ( linda, &sendctrl, QIB_7220_SendCtrl_offset );
587 mb();
588
589 /* Ensure hardware has seen this disable */
590 linda_readq ( linda, &sendctrl, QIB_7220_SendCtrl_offset );
591
592 free_dma ( linda->sendbufavail, sizeof ( *linda->sendbufavail ) );
593 }
594
595 /***************************************************************************
596 *
597 * Receive datapath
598 *
599 ***************************************************************************
600 */
601
602 /**
603 * Create receive work queue
604 *
605 * @v linda Linda device
606 * @v qp Queue pair
607 * @ret rc Return status code
608 */
linda_create_recv_wq(struct linda * linda,struct ib_queue_pair * qp)609 static int linda_create_recv_wq ( struct linda *linda,
610 struct ib_queue_pair *qp ) {
611 struct ib_work_queue *wq = &qp->recv;
612 struct linda_recv_work_queue *linda_wq = ib_wq_get_drvdata ( wq );
613 struct QIB_7220_RcvHdrAddr0 rcvhdraddr;
614 struct QIB_7220_RcvHdrTailAddr0 rcvhdrtailaddr;
615 struct QIB_7220_RcvHdrHead0 rcvhdrhead;
616 struct QIB_7220_scalar rcvegrindexhead;
617 struct QIB_7220_RcvCtrl rcvctrl;
618 unsigned int ctx = linda_qpn_to_ctx ( qp->qpn );
619 int rc;
620
621 /* Reset context information */
622 memset ( &linda_wq->header_prod, 0,
623 sizeof ( linda_wq->header_prod ) );
624 linda_wq->header_cons = 0;
625 linda_wq->eager_prod = 0;
626 linda_wq->eager_cons = 0;
627
628 /* Allocate receive header buffer */
629 linda_wq->header = malloc_dma ( LINDA_RECV_HEADERS_SIZE,
630 LINDA_RECV_HEADERS_ALIGN );
631 if ( ! linda_wq->header ) {
632 rc = -ENOMEM;
633 goto err_alloc_header;
634 }
635
636 /* Enable context in hardware */
637 memset ( &rcvhdraddr, 0, sizeof ( rcvhdraddr ) );
638 BIT_FILL_1 ( &rcvhdraddr, RcvHdrAddr0,
639 ( virt_to_bus ( linda_wq->header ) >> 2 ) );
640 linda_writeq_array8b ( linda, &rcvhdraddr,
641 QIB_7220_RcvHdrAddr0_offset, ctx );
642 memset ( &rcvhdrtailaddr, 0, sizeof ( rcvhdrtailaddr ) );
643 BIT_FILL_1 ( &rcvhdrtailaddr, RcvHdrTailAddr0,
644 ( virt_to_bus ( &linda_wq->header_prod ) >> 2 ) );
645 linda_writeq_array8b ( linda, &rcvhdrtailaddr,
646 QIB_7220_RcvHdrTailAddr0_offset, ctx );
647 memset ( &rcvhdrhead, 0, sizeof ( rcvhdrhead ) );
648 BIT_FILL_1 ( &rcvhdrhead, counter, 1 );
649 linda_writeq_array64k ( linda, &rcvhdrhead,
650 QIB_7220_RcvHdrHead0_offset, ctx );
651 memset ( &rcvegrindexhead, 0, sizeof ( rcvegrindexhead ) );
652 BIT_FILL_1 ( &rcvegrindexhead, Value, 1 );
653 linda_writeq_array64k ( linda, &rcvegrindexhead,
654 QIB_7220_RcvEgrIndexHead0_offset, ctx );
655 linda_readq ( linda, &rcvctrl, QIB_7220_RcvCtrl_offset );
656 BIT_SET ( &rcvctrl, PortEnable[ctx], 1 );
657 BIT_SET ( &rcvctrl, IntrAvail[ctx], 1 );
658 linda_writeq ( linda, &rcvctrl, QIB_7220_RcvCtrl_offset );
659
660 DBGC ( linda, "Linda %p QPN %ld CTX %d hdrs [%lx,%lx) prod %lx\n",
661 linda, qp->qpn, ctx, virt_to_bus ( linda_wq->header ),
662 ( virt_to_bus ( linda_wq->header ) + LINDA_RECV_HEADERS_SIZE ),
663 virt_to_bus ( &linda_wq->header_prod ) );
664 return 0;
665
666 free_dma ( linda_wq->header, LINDA_RECV_HEADERS_SIZE );
667 err_alloc_header:
668 return rc;
669 }
670
671 /**
672 * Destroy receive work queue
673 *
674 * @v linda Linda device
675 * @v qp Queue pair
676 */
linda_destroy_recv_wq(struct linda * linda,struct ib_queue_pair * qp)677 static void linda_destroy_recv_wq ( struct linda *linda,
678 struct ib_queue_pair *qp ) {
679 struct ib_work_queue *wq = &qp->recv;
680 struct linda_recv_work_queue *linda_wq = ib_wq_get_drvdata ( wq );
681 struct QIB_7220_RcvCtrl rcvctrl;
682 unsigned int ctx = linda_qpn_to_ctx ( qp->qpn );
683
684 /* Disable context in hardware */
685 linda_readq ( linda, &rcvctrl, QIB_7220_RcvCtrl_offset );
686 BIT_SET ( &rcvctrl, PortEnable[ctx], 0 );
687 BIT_SET ( &rcvctrl, IntrAvail[ctx], 0 );
688 linda_writeq ( linda, &rcvctrl, QIB_7220_RcvCtrl_offset );
689
690 /* Make sure the hardware has seen that the context is disabled */
691 linda_readq ( linda, &rcvctrl, QIB_7220_RcvCtrl_offset );
692 mb();
693
694 /* Free headers ring */
695 free_dma ( linda_wq->header, LINDA_RECV_HEADERS_SIZE );
696
697 /* Free context */
698 linda_free_ctx ( linda, ctx );
699 }
700
701 /**
702 * Initialise receive datapath
703 *
704 * @v linda Linda device
705 * @ret rc Return status code
706 */
linda_init_recv(struct linda * linda)707 static int linda_init_recv ( struct linda *linda ) {
708 struct QIB_7220_RcvCtrl rcvctrl;
709 struct QIB_7220_scalar rcvegrbase;
710 struct QIB_7220_scalar rcvhdrentsize;
711 struct QIB_7220_scalar rcvhdrcnt;
712 struct QIB_7220_RcvBTHQP rcvbthqp;
713 unsigned int portcfg;
714 unsigned long egrbase;
715 unsigned int eager_array_size_0;
716 unsigned int eager_array_size_other;
717 unsigned int ctx;
718
719 /* Select configuration based on number of contexts */
720 switch ( LINDA_NUM_CONTEXTS ) {
721 case 5:
722 portcfg = LINDA_PORTCFG_5CTX;
723 eager_array_size_0 = LINDA_EAGER_ARRAY_SIZE_5CTX_0;
724 eager_array_size_other = LINDA_EAGER_ARRAY_SIZE_5CTX_OTHER;
725 break;
726 case 9:
727 portcfg = LINDA_PORTCFG_9CTX;
728 eager_array_size_0 = LINDA_EAGER_ARRAY_SIZE_9CTX_0;
729 eager_array_size_other = LINDA_EAGER_ARRAY_SIZE_9CTX_OTHER;
730 break;
731 case 17:
732 portcfg = LINDA_PORTCFG_17CTX;
733 eager_array_size_0 = LINDA_EAGER_ARRAY_SIZE_17CTX_0;
734 eager_array_size_other = LINDA_EAGER_ARRAY_SIZE_17CTX_OTHER;
735 break;
736 default:
737 linker_assert ( 0, invalid_LINDA_NUM_CONTEXTS );
738 return -EINVAL;
739 }
740
741 /* Configure number of contexts */
742 memset ( &rcvctrl, 0, sizeof ( rcvctrl ) );
743 BIT_FILL_3 ( &rcvctrl,
744 TailUpd, 1,
745 PortCfg, portcfg,
746 RcvQPMapEnable, 1 );
747 linda_writeq ( linda, &rcvctrl, QIB_7220_RcvCtrl_offset );
748
749 /* Configure receive header buffer sizes */
750 memset ( &rcvhdrcnt, 0, sizeof ( rcvhdrcnt ) );
751 BIT_FILL_1 ( &rcvhdrcnt, Value, LINDA_RECV_HEADER_COUNT );
752 linda_writeq ( linda, &rcvhdrcnt, QIB_7220_RcvHdrCnt_offset );
753 memset ( &rcvhdrentsize, 0, sizeof ( rcvhdrentsize ) );
754 BIT_FILL_1 ( &rcvhdrentsize, Value, ( LINDA_RECV_HEADER_SIZE >> 2 ) );
755 linda_writeq ( linda, &rcvhdrentsize, QIB_7220_RcvHdrEntSize_offset );
756
757 /* Calculate eager array start addresses for each context */
758 linda_readq ( linda, &rcvegrbase, QIB_7220_RcvEgrBase_offset );
759 egrbase = BIT_GET ( &rcvegrbase, Value );
760 linda->recv_wq[0].eager_array = egrbase;
761 linda->recv_wq[0].eager_entries = eager_array_size_0;
762 egrbase += ( eager_array_size_0 * sizeof ( struct QIB_7220_RcvEgr ) );
763 for ( ctx = 1 ; ctx < LINDA_NUM_CONTEXTS ; ctx++ ) {
764 linda->recv_wq[ctx].eager_array = egrbase;
765 linda->recv_wq[ctx].eager_entries = eager_array_size_other;
766 egrbase += ( eager_array_size_other *
767 sizeof ( struct QIB_7220_RcvEgr ) );
768 }
769 for ( ctx = 0 ; ctx < LINDA_NUM_CONTEXTS ; ctx++ ) {
770 DBGC ( linda, "Linda %p CTX %d eager array at %lx (%d "
771 "entries)\n", linda, ctx,
772 linda->recv_wq[ctx].eager_array,
773 linda->recv_wq[ctx].eager_entries );
774 }
775
776 /* Set the BTH QP for Infinipath packets to an unused value */
777 memset ( &rcvbthqp, 0, sizeof ( rcvbthqp ) );
778 BIT_FILL_1 ( &rcvbthqp, RcvBTHQP, LINDA_QP_IDETH );
779 linda_writeq ( linda, &rcvbthqp, QIB_7220_RcvBTHQP_offset );
780
781 return 0;
782 }
783
784 /**
785 * Shut down receive datapath
786 *
787 * @v linda Linda device
788 */
linda_fini_recv(struct linda * linda __unused)789 static void linda_fini_recv ( struct linda *linda __unused ) {
790 /* Nothing to do; all contexts were already disabled when the
791 * queue pairs were destroyed
792 */
793 }
794
795 /***************************************************************************
796 *
797 * Completion queue operations
798 *
799 ***************************************************************************
800 */
801
802 /**
803 * Create completion queue
804 *
805 * @v ibdev Infiniband device
806 * @v cq Completion queue
807 * @ret rc Return status code
808 */
linda_create_cq(struct ib_device * ibdev,struct ib_completion_queue * cq)809 static int linda_create_cq ( struct ib_device *ibdev,
810 struct ib_completion_queue *cq ) {
811 struct linda *linda = ib_get_drvdata ( ibdev );
812 static int cqn;
813
814 /* The hardware has no concept of completion queues. We
815 * simply use the association between CQs and WQs (already
816 * handled by the IB core) to decide which WQs to poll.
817 *
818 * We do set a CQN, just to avoid confusing debug messages
819 * from the IB core.
820 */
821 cq->cqn = ++cqn;
822 DBGC ( linda, "Linda %p CQN %ld created\n", linda, cq->cqn );
823
824 return 0;
825 }
826
827 /**
828 * Destroy completion queue
829 *
830 * @v ibdev Infiniband device
831 * @v cq Completion queue
832 */
linda_destroy_cq(struct ib_device * ibdev,struct ib_completion_queue * cq)833 static void linda_destroy_cq ( struct ib_device *ibdev,
834 struct ib_completion_queue *cq ) {
835 struct linda *linda = ib_get_drvdata ( ibdev );
836
837 /* Nothing to do */
838 DBGC ( linda, "Linda %p CQN %ld destroyed\n", linda, cq->cqn );
839 }
840
841 /***************************************************************************
842 *
843 * Queue pair operations
844 *
845 ***************************************************************************
846 */
847
848 /**
849 * Create queue pair
850 *
851 * @v ibdev Infiniband device
852 * @v qp Queue pair
853 * @ret rc Return status code
854 */
linda_create_qp(struct ib_device * ibdev,struct ib_queue_pair * qp)855 static int linda_create_qp ( struct ib_device *ibdev,
856 struct ib_queue_pair *qp ) {
857 struct linda *linda = ib_get_drvdata ( ibdev );
858 int ctx;
859 int rc;
860
861 /* Locate an available context */
862 ctx = linda_alloc_ctx ( linda );
863 if ( ctx < 0 ) {
864 rc = ctx;
865 goto err_alloc_ctx;
866 }
867
868 /* Set queue pair number based on context index */
869 qp->qpn = linda_ctx_to_qpn ( ctx );
870
871 /* Set work-queue private data pointers */
872 ib_wq_set_drvdata ( &qp->send, &linda->send_wq[ctx] );
873 ib_wq_set_drvdata ( &qp->recv, &linda->recv_wq[ctx] );
874
875 /* Create receive work queue */
876 if ( ( rc = linda_create_recv_wq ( linda, qp ) ) != 0 )
877 goto err_create_recv_wq;
878
879 /* Create send work queue */
880 if ( ( rc = linda_create_send_wq ( linda, qp ) ) != 0 )
881 goto err_create_send_wq;
882
883 return 0;
884
885 linda_destroy_send_wq ( linda, qp );
886 err_create_send_wq:
887 linda_destroy_recv_wq ( linda, qp );
888 err_create_recv_wq:
889 linda_free_ctx ( linda, ctx );
890 err_alloc_ctx:
891 return rc;
892 }
893
894 /**
895 * Modify queue pair
896 *
897 * @v ibdev Infiniband device
898 * @v qp Queue pair
899 * @ret rc Return status code
900 */
linda_modify_qp(struct ib_device * ibdev,struct ib_queue_pair * qp)901 static int linda_modify_qp ( struct ib_device *ibdev,
902 struct ib_queue_pair *qp ) {
903 struct linda *linda = ib_get_drvdata ( ibdev );
904
905 /* Nothing to do; the hardware doesn't have a notion of queue
906 * keys
907 */
908 DBGC ( linda, "Linda %p QPN %ld modified\n", linda, qp->qpn );
909 return 0;
910 }
911
912 /**
913 * Destroy queue pair
914 *
915 * @v ibdev Infiniband device
916 * @v qp Queue pair
917 */
linda_destroy_qp(struct ib_device * ibdev,struct ib_queue_pair * qp)918 static void linda_destroy_qp ( struct ib_device *ibdev,
919 struct ib_queue_pair *qp ) {
920 struct linda *linda = ib_get_drvdata ( ibdev );
921
922 linda_destroy_send_wq ( linda, qp );
923 linda_destroy_recv_wq ( linda, qp );
924 }
925
926 /***************************************************************************
927 *
928 * Work request operations
929 *
930 ***************************************************************************
931 */
932
933 /**
934 * Post send work queue entry
935 *
936 * @v ibdev Infiniband device
937 * @v qp Queue pair
938 * @v av Address vector
939 * @v iobuf I/O buffer
940 * @ret rc Return status code
941 */
linda_post_send(struct ib_device * ibdev,struct ib_queue_pair * qp,struct ib_address_vector * av,struct io_buffer * iobuf)942 static int linda_post_send ( struct ib_device *ibdev,
943 struct ib_queue_pair *qp,
944 struct ib_address_vector *av,
945 struct io_buffer *iobuf ) {
946 struct linda *linda = ib_get_drvdata ( ibdev );
947 struct ib_work_queue *wq = &qp->send;
948 struct linda_send_work_queue *linda_wq = ib_wq_get_drvdata ( wq );
949 struct QIB_7220_SendPbc sendpbc;
950 uint8_t header_buf[IB_MAX_HEADER_SIZE];
951 struct io_buffer headers;
952 unsigned int send_buf;
953 unsigned long start_offset;
954 unsigned long offset;
955 size_t len;
956 ssize_t frag_len;
957 uint32_t *data;
958
959 /* Allocate send buffer and calculate offset */
960 send_buf = linda_alloc_send_buf ( linda );
961 start_offset = offset = linda_send_buffer_offset ( linda, send_buf );
962
963 /* Store I/O buffer and send buffer index */
964 assert ( wq->iobufs[linda_wq->prod] == NULL );
965 wq->iobufs[linda_wq->prod] = iobuf;
966 linda_wq->send_buf[linda_wq->prod] = send_buf;
967
968 /* Construct headers */
969 iob_populate ( &headers, header_buf, 0, sizeof ( header_buf ) );
970 iob_reserve ( &headers, sizeof ( header_buf ) );
971 ib_push ( ibdev, &headers, qp, iob_len ( iobuf ), av );
972
973 /* Calculate packet length */
974 len = ( ( sizeof ( sendpbc ) + iob_len ( &headers ) +
975 iob_len ( iobuf ) + 3 ) & ~3 );
976
977 /* Construct send per-buffer control word */
978 memset ( &sendpbc, 0, sizeof ( sendpbc ) );
979 BIT_FILL_2 ( &sendpbc,
980 LengthP1_toibc, ( ( len >> 2 ) - 1 ),
981 VL15, 1 );
982
983 /* Write SendPbc */
984 DBG_DISABLE ( DBGLVL_IO );
985 linda_writeq ( linda, &sendpbc, offset );
986 offset += sizeof ( sendpbc );
987
988 /* Write headers */
989 for ( data = headers.data, frag_len = iob_len ( &headers ) ;
990 frag_len > 0 ; data++, offset += 4, frag_len -= 4 ) {
991 linda_writel ( linda, *data, offset );
992 }
993
994 /* Write data */
995 for ( data = iobuf->data, frag_len = iob_len ( iobuf ) ;
996 frag_len > 0 ; data++, offset += 4, frag_len -= 4 ) {
997 linda_writel ( linda, *data, offset );
998 }
999 DBG_ENABLE ( DBGLVL_IO );
1000
1001 assert ( ( start_offset + len ) == offset );
1002 DBGC2 ( linda, "Linda %p QPN %ld TX %d(%d) posted [%lx,%lx)\n",
1003 linda, qp->qpn, send_buf, linda_wq->prod,
1004 start_offset, offset );
1005
1006 /* Increment producer counter */
1007 linda_wq->prod = ( ( linda_wq->prod + 1 ) & ( wq->num_wqes - 1 ) );
1008
1009 return 0;
1010 }
1011
1012 /**
1013 * Complete send work queue entry
1014 *
1015 * @v ibdev Infiniband device
1016 * @v qp Queue pair
1017 * @v wqe_idx Work queue entry index
1018 */
linda_complete_send(struct ib_device * ibdev,struct ib_queue_pair * qp,unsigned int wqe_idx)1019 static void linda_complete_send ( struct ib_device *ibdev,
1020 struct ib_queue_pair *qp,
1021 unsigned int wqe_idx ) {
1022 struct linda *linda = ib_get_drvdata ( ibdev );
1023 struct ib_work_queue *wq = &qp->send;
1024 struct linda_send_work_queue *linda_wq = ib_wq_get_drvdata ( wq );
1025 struct io_buffer *iobuf;
1026 unsigned int send_buf;
1027
1028 /* Parse completion */
1029 send_buf = linda_wq->send_buf[wqe_idx];
1030 DBGC2 ( linda, "Linda %p QPN %ld TX %d(%d) complete\n",
1031 linda, qp->qpn, send_buf, wqe_idx );
1032
1033 /* Complete work queue entry */
1034 iobuf = wq->iobufs[wqe_idx];
1035 assert ( iobuf != NULL );
1036 ib_complete_send ( ibdev, qp, iobuf, 0 );
1037 wq->iobufs[wqe_idx] = NULL;
1038
1039 /* Free send buffer */
1040 linda_free_send_buf ( linda, send_buf );
1041 }
1042
1043 /**
1044 * Poll send work queue
1045 *
1046 * @v ibdev Infiniband device
1047 * @v qp Queue pair
1048 */
linda_poll_send_wq(struct ib_device * ibdev,struct ib_queue_pair * qp)1049 static void linda_poll_send_wq ( struct ib_device *ibdev,
1050 struct ib_queue_pair *qp ) {
1051 struct linda *linda = ib_get_drvdata ( ibdev );
1052 struct ib_work_queue *wq = &qp->send;
1053 struct linda_send_work_queue *linda_wq = ib_wq_get_drvdata ( wq );
1054 unsigned int send_buf;
1055
1056 /* Look for completions */
1057 while ( wq->fill ) {
1058
1059 /* Check to see if send buffer has completed */
1060 send_buf = linda_wq->send_buf[linda_wq->cons];
1061 if ( linda_send_buf_in_use ( linda, send_buf ) )
1062 break;
1063
1064 /* Complete this buffer */
1065 linda_complete_send ( ibdev, qp, linda_wq->cons );
1066
1067 /* Increment consumer counter */
1068 linda_wq->cons = ( ( linda_wq->cons + 1 ) &
1069 ( wq->num_wqes - 1 ) );
1070 }
1071 }
1072
1073 /**
1074 * Post receive work queue entry
1075 *
1076 * @v ibdev Infiniband device
1077 * @v qp Queue pair
1078 * @v iobuf I/O buffer
1079 * @ret rc Return status code
1080 */
linda_post_recv(struct ib_device * ibdev,struct ib_queue_pair * qp,struct io_buffer * iobuf)1081 static int linda_post_recv ( struct ib_device *ibdev,
1082 struct ib_queue_pair *qp,
1083 struct io_buffer *iobuf ) {
1084 struct linda *linda = ib_get_drvdata ( ibdev );
1085 struct ib_work_queue *wq = &qp->recv;
1086 struct linda_recv_work_queue *linda_wq = ib_wq_get_drvdata ( wq );
1087 struct QIB_7220_RcvEgr rcvegr;
1088 struct QIB_7220_scalar rcvegrindexhead;
1089 unsigned int ctx = linda_qpn_to_ctx ( qp->qpn );
1090 physaddr_t addr;
1091 size_t len;
1092 unsigned int wqe_idx;
1093 unsigned int bufsize;
1094
1095 /* Sanity checks */
1096 addr = virt_to_bus ( iobuf->data );
1097 len = iob_tailroom ( iobuf );
1098 if ( addr & ( LINDA_EAGER_BUFFER_ALIGN - 1 ) ) {
1099 DBGC ( linda, "Linda %p QPN %ld misaligned RX buffer "
1100 "(%08lx)\n", linda, qp->qpn, addr );
1101 return -EINVAL;
1102 }
1103 if ( len != LINDA_RECV_PAYLOAD_SIZE ) {
1104 DBGC ( linda, "Linda %p QPN %ld wrong RX buffer size (%zd)\n",
1105 linda, qp->qpn, len );
1106 return -EINVAL;
1107 }
1108
1109 /* Calculate eager producer index and WQE index */
1110 wqe_idx = ( linda_wq->eager_prod & ( wq->num_wqes - 1 ) );
1111 assert ( wq->iobufs[wqe_idx] == NULL );
1112
1113 /* Store I/O buffer */
1114 wq->iobufs[wqe_idx] = iobuf;
1115
1116 /* Calculate buffer size */
1117 switch ( LINDA_RECV_PAYLOAD_SIZE ) {
1118 case 2048: bufsize = LINDA_EAGER_BUFFER_2K; break;
1119 case 4096: bufsize = LINDA_EAGER_BUFFER_4K; break;
1120 case 8192: bufsize = LINDA_EAGER_BUFFER_8K; break;
1121 case 16384: bufsize = LINDA_EAGER_BUFFER_16K; break;
1122 case 32768: bufsize = LINDA_EAGER_BUFFER_32K; break;
1123 case 65536: bufsize = LINDA_EAGER_BUFFER_64K; break;
1124 default: linker_assert ( 0, invalid_rx_payload_size );
1125 bufsize = LINDA_EAGER_BUFFER_NONE;
1126 }
1127
1128 /* Post eager buffer */
1129 memset ( &rcvegr, 0, sizeof ( rcvegr ) );
1130 BIT_FILL_2 ( &rcvegr,
1131 Addr, ( addr >> 11 ),
1132 BufSize, bufsize );
1133 linda_writeq_array8b ( linda, &rcvegr,
1134 linda_wq->eager_array, linda_wq->eager_prod );
1135 DBGC2 ( linda, "Linda %p QPN %ld RX egr %d(%d) posted [%lx,%lx)\n",
1136 linda, qp->qpn, linda_wq->eager_prod, wqe_idx,
1137 addr, ( addr + len ) );
1138
1139 /* Increment producer index */
1140 linda_wq->eager_prod = ( ( linda_wq->eager_prod + 1 ) &
1141 ( linda_wq->eager_entries - 1 ) );
1142
1143 /* Update head index */
1144 memset ( &rcvegrindexhead, 0, sizeof ( rcvegrindexhead ) );
1145 BIT_FILL_1 ( &rcvegrindexhead,
1146 Value, ( ( linda_wq->eager_prod + 1 ) &
1147 ( linda_wq->eager_entries - 1 ) ) );
1148 linda_writeq_array64k ( linda, &rcvegrindexhead,
1149 QIB_7220_RcvEgrIndexHead0_offset, ctx );
1150
1151 return 0;
1152 }
1153
1154 /**
1155 * Complete receive work queue entry
1156 *
1157 * @v ibdev Infiniband device
1158 * @v qp Queue pair
1159 * @v header_offs Header offset
1160 */
linda_complete_recv(struct ib_device * ibdev,struct ib_queue_pair * qp,unsigned int header_offs)1161 static void linda_complete_recv ( struct ib_device *ibdev,
1162 struct ib_queue_pair *qp,
1163 unsigned int header_offs ) {
1164 struct linda *linda = ib_get_drvdata ( ibdev );
1165 struct ib_work_queue *wq = &qp->recv;
1166 struct linda_recv_work_queue *linda_wq = ib_wq_get_drvdata ( wq );
1167 struct QIB_7220_RcvHdrFlags *rcvhdrflags;
1168 struct QIB_7220_RcvEgr rcvegr;
1169 struct io_buffer headers;
1170 struct io_buffer *iobuf;
1171 struct ib_queue_pair *intended_qp;
1172 struct ib_address_vector av;
1173 unsigned int rcvtype;
1174 unsigned int pktlen;
1175 unsigned int egrindex;
1176 unsigned int useegrbfr;
1177 unsigned int iberr, mkerr, tiderr, khdrerr, mtuerr;
1178 unsigned int lenerr, parityerr, vcrcerr, icrcerr;
1179 unsigned int err;
1180 unsigned int hdrqoffset;
1181 unsigned int header_len;
1182 unsigned int padded_payload_len;
1183 unsigned int wqe_idx;
1184 size_t payload_len;
1185 int qp0;
1186 int rc;
1187
1188 /* RcvHdrFlags are at the end of the header entry */
1189 rcvhdrflags = ( linda_wq->header + header_offs +
1190 LINDA_RECV_HEADER_SIZE - sizeof ( *rcvhdrflags ) );
1191 rcvtype = BIT_GET ( rcvhdrflags, RcvType );
1192 pktlen = ( BIT_GET ( rcvhdrflags, PktLen ) << 2 );
1193 egrindex = BIT_GET ( rcvhdrflags, EgrIndex );
1194 useegrbfr = BIT_GET ( rcvhdrflags, UseEgrBfr );
1195 hdrqoffset = ( BIT_GET ( rcvhdrflags, HdrqOffset ) << 2 );
1196 iberr = BIT_GET ( rcvhdrflags, IBErr );
1197 mkerr = BIT_GET ( rcvhdrflags, MKErr );
1198 tiderr = BIT_GET ( rcvhdrflags, TIDErr );
1199 khdrerr = BIT_GET ( rcvhdrflags, KHdrErr );
1200 mtuerr = BIT_GET ( rcvhdrflags, MTUErr );
1201 lenerr = BIT_GET ( rcvhdrflags, LenErr );
1202 parityerr = BIT_GET ( rcvhdrflags, ParityErr );
1203 vcrcerr = BIT_GET ( rcvhdrflags, VCRCErr );
1204 icrcerr = BIT_GET ( rcvhdrflags, ICRCErr );
1205 header_len = ( LINDA_RECV_HEADER_SIZE - hdrqoffset -
1206 sizeof ( *rcvhdrflags ) );
1207 padded_payload_len = ( pktlen - header_len - 4 /* ICRC */ );
1208 err = ( iberr | mkerr | tiderr | khdrerr | mtuerr |
1209 lenerr | parityerr | vcrcerr | icrcerr );
1210 /* IB header is placed immediately before RcvHdrFlags */
1211 iob_populate ( &headers, ( ( ( void * ) rcvhdrflags ) - header_len ),
1212 header_len, header_len );
1213
1214 /* Dump diagnostic information */
1215 if ( err || ( ! useegrbfr ) ) {
1216 DBGC ( linda, "Linda %p QPN %ld RX egr %d%s hdr %d type %d "
1217 "len %d(%d+%d+4)%s%s%s%s%s%s%s%s%s%s%s\n", linda,
1218 qp->qpn, egrindex, ( useegrbfr ? "" : "(unused)" ),
1219 ( header_offs / LINDA_RECV_HEADER_SIZE ), rcvtype,
1220 pktlen, header_len, padded_payload_len,
1221 ( err ? " [Err" : "" ), ( iberr ? " IB" : "" ),
1222 ( mkerr ? " MK" : "" ), ( tiderr ? " TID" : "" ),
1223 ( khdrerr ? " KHdr" : "" ), ( mtuerr ? " MTU" : "" ),
1224 ( lenerr ? " Len" : "" ), ( parityerr ? " Parity" : ""),
1225 ( vcrcerr ? " VCRC" : "" ), ( icrcerr ? " ICRC" : "" ),
1226 ( err ? "]" : "" ) );
1227 } else {
1228 DBGC2 ( linda, "Linda %p QPN %ld RX egr %d hdr %d type %d "
1229 "len %d(%d+%d+4)\n", linda, qp->qpn, egrindex,
1230 ( header_offs / LINDA_RECV_HEADER_SIZE ), rcvtype,
1231 pktlen, header_len, padded_payload_len );
1232 }
1233 DBGCP_HDA ( linda, hdrqoffset, headers.data,
1234 ( header_len + sizeof ( *rcvhdrflags ) ) );
1235
1236 /* Parse header to generate address vector */
1237 qp0 = ( qp->qpn == 0 );
1238 intended_qp = NULL;
1239 if ( ( rc = ib_pull ( ibdev, &headers, ( qp0 ? &intended_qp : NULL ),
1240 &payload_len, &av ) ) != 0 ) {
1241 DBGC ( linda, "Linda %p could not parse headers: %s\n",
1242 linda, strerror ( rc ) );
1243 err = 1;
1244 }
1245 if ( ! intended_qp )
1246 intended_qp = qp;
1247
1248 /* Complete this buffer and any skipped buffers. Note that
1249 * when the hardware runs out of buffers, it will repeatedly
1250 * report the same buffer (the tail) as a TID error, and that
1251 * it also has a habit of sometimes skipping over several
1252 * buffers at once.
1253 */
1254 while ( 1 ) {
1255
1256 /* If we have caught up to the producer counter, stop.
1257 * This will happen when the hardware first runs out
1258 * of buffers and starts reporting TID errors against
1259 * the eager buffer it wants to use next.
1260 */
1261 if ( linda_wq->eager_cons == linda_wq->eager_prod )
1262 break;
1263
1264 /* If we have caught up to where we should be after
1265 * completing this egrindex, stop. We phrase the test
1266 * this way to avoid completing the entire ring when
1267 * we receive the same egrindex twice in a row.
1268 */
1269 if ( ( linda_wq->eager_cons ==
1270 ( ( egrindex + 1 ) & ( linda_wq->eager_entries - 1 ) )))
1271 break;
1272
1273 /* Identify work queue entry and corresponding I/O
1274 * buffer.
1275 */
1276 wqe_idx = ( linda_wq->eager_cons & ( wq->num_wqes - 1 ) );
1277 iobuf = wq->iobufs[wqe_idx];
1278 assert ( iobuf != NULL );
1279 wq->iobufs[wqe_idx] = NULL;
1280
1281 /* Complete the eager buffer */
1282 if ( linda_wq->eager_cons == egrindex ) {
1283 /* Completing the eager buffer described in
1284 * this header entry.
1285 */
1286 iob_put ( iobuf, payload_len );
1287 rc = ( err ? -EIO : ( useegrbfr ? 0 : -ECANCELED ) );
1288 /* Redirect to target QP if necessary */
1289 if ( qp != intended_qp ) {
1290 DBGC ( linda, "Linda %p redirecting QPN %ld "
1291 "=> %ld\n",
1292 linda, qp->qpn, intended_qp->qpn );
1293 /* Compensate for incorrect fill levels */
1294 qp->recv.fill--;
1295 intended_qp->recv.fill++;
1296 }
1297 ib_complete_recv ( ibdev, intended_qp, &av, iobuf, rc);
1298 } else {
1299 /* Completing on a skipped-over eager buffer */
1300 ib_complete_recv ( ibdev, qp, &av, iobuf, -ECANCELED );
1301 }
1302
1303 /* Clear eager buffer */
1304 memset ( &rcvegr, 0, sizeof ( rcvegr ) );
1305 linda_writeq_array8b ( linda, &rcvegr, linda_wq->eager_array,
1306 linda_wq->eager_cons );
1307
1308 /* Increment consumer index */
1309 linda_wq->eager_cons = ( ( linda_wq->eager_cons + 1 ) &
1310 ( linda_wq->eager_entries - 1 ) );
1311 }
1312 }
1313
1314 /**
1315 * Poll receive work queue
1316 *
1317 * @v ibdev Infiniband device
1318 * @v qp Queue pair
1319 */
linda_poll_recv_wq(struct ib_device * ibdev,struct ib_queue_pair * qp)1320 static void linda_poll_recv_wq ( struct ib_device *ibdev,
1321 struct ib_queue_pair *qp ) {
1322 struct linda *linda = ib_get_drvdata ( ibdev );
1323 struct ib_work_queue *wq = &qp->recv;
1324 struct linda_recv_work_queue *linda_wq = ib_wq_get_drvdata ( wq );
1325 struct QIB_7220_RcvHdrHead0 rcvhdrhead;
1326 unsigned int ctx = linda_qpn_to_ctx ( qp->qpn );
1327 unsigned int header_prod;
1328
1329 /* Check for received packets */
1330 header_prod = ( BIT_GET ( &linda_wq->header_prod, Value ) << 2 );
1331 if ( header_prod == linda_wq->header_cons )
1332 return;
1333
1334 /* Process all received packets */
1335 while ( linda_wq->header_cons != header_prod ) {
1336
1337 /* Complete the receive */
1338 linda_complete_recv ( ibdev, qp, linda_wq->header_cons );
1339
1340 /* Increment the consumer offset */
1341 linda_wq->header_cons += LINDA_RECV_HEADER_SIZE;
1342 linda_wq->header_cons %= LINDA_RECV_HEADERS_SIZE;
1343 }
1344
1345 /* Update consumer offset */
1346 memset ( &rcvhdrhead, 0, sizeof ( rcvhdrhead ) );
1347 BIT_FILL_2 ( &rcvhdrhead,
1348 RcvHeadPointer, ( linda_wq->header_cons >> 2 ),
1349 counter, 1 );
1350 linda_writeq_array64k ( linda, &rcvhdrhead,
1351 QIB_7220_RcvHdrHead0_offset, ctx );
1352 }
1353
1354 /**
1355 * Poll completion queue
1356 *
1357 * @v ibdev Infiniband device
1358 * @v cq Completion queue
1359 */
linda_poll_cq(struct ib_device * ibdev,struct ib_completion_queue * cq)1360 static void linda_poll_cq ( struct ib_device *ibdev,
1361 struct ib_completion_queue *cq ) {
1362 struct ib_work_queue *wq;
1363
1364 /* Poll associated send and receive queues */
1365 list_for_each_entry ( wq, &cq->work_queues, list ) {
1366 if ( wq->is_send ) {
1367 linda_poll_send_wq ( ibdev, wq->qp );
1368 } else {
1369 linda_poll_recv_wq ( ibdev, wq->qp );
1370 }
1371 }
1372 }
1373
1374 /***************************************************************************
1375 *
1376 * Event queues
1377 *
1378 ***************************************************************************
1379 */
1380
1381 /**
1382 * Poll event queue
1383 *
1384 * @v ibdev Infiniband device
1385 */
linda_poll_eq(struct ib_device * ibdev)1386 static void linda_poll_eq ( struct ib_device *ibdev ) {
1387 struct linda *linda = ib_get_drvdata ( ibdev );
1388 struct QIB_7220_ErrStatus errstatus;
1389 struct QIB_7220_ErrClear errclear;
1390
1391 /* Check for link status changes */
1392 DBG_DISABLE ( DBGLVL_IO );
1393 linda_readq ( linda, &errstatus, QIB_7220_ErrStatus_offset );
1394 DBG_ENABLE ( DBGLVL_IO );
1395 if ( BIT_GET ( &errstatus, IBStatusChanged ) ) {
1396 linda_link_state_changed ( ibdev );
1397 memset ( &errclear, 0, sizeof ( errclear ) );
1398 BIT_FILL_1 ( &errclear, IBStatusChangedClear, 1 );
1399 linda_writeq ( linda, &errclear, QIB_7220_ErrClear_offset );
1400 }
1401 }
1402
1403 /***************************************************************************
1404 *
1405 * Infiniband link-layer operations
1406 *
1407 ***************************************************************************
1408 */
1409
1410 /**
1411 * Initialise Infiniband link
1412 *
1413 * @v ibdev Infiniband device
1414 * @ret rc Return status code
1415 */
linda_open(struct ib_device * ibdev)1416 static int linda_open ( struct ib_device *ibdev ) {
1417 struct linda *linda = ib_get_drvdata ( ibdev );
1418 struct QIB_7220_Control control;
1419
1420 /* Disable link */
1421 linda_readq ( linda, &control, QIB_7220_Control_offset );
1422 BIT_SET ( &control, LinkEn, 1 );
1423 linda_writeq ( linda, &control, QIB_7220_Control_offset );
1424 return 0;
1425 }
1426
1427 /**
1428 * Close Infiniband link
1429 *
1430 * @v ibdev Infiniband device
1431 */
linda_close(struct ib_device * ibdev)1432 static void linda_close ( struct ib_device *ibdev ) {
1433 struct linda *linda = ib_get_drvdata ( ibdev );
1434 struct QIB_7220_Control control;
1435
1436 /* Disable link */
1437 linda_readq ( linda, &control, QIB_7220_Control_offset );
1438 BIT_SET ( &control, LinkEn, 0 );
1439 linda_writeq ( linda, &control, QIB_7220_Control_offset );
1440 }
1441
1442 /***************************************************************************
1443 *
1444 * Multicast group operations
1445 *
1446 ***************************************************************************
1447 */
1448
1449 /**
1450 * Attach to multicast group
1451 *
1452 * @v ibdev Infiniband device
1453 * @v qp Queue pair
1454 * @v gid Multicast GID
1455 * @ret rc Return status code
1456 */
linda_mcast_attach(struct ib_device * ibdev,struct ib_queue_pair * qp,struct ib_gid * gid)1457 static int linda_mcast_attach ( struct ib_device *ibdev,
1458 struct ib_queue_pair *qp,
1459 struct ib_gid *gid ) {
1460 struct linda *linda = ib_get_drvdata ( ibdev );
1461
1462 ( void ) linda;
1463 ( void ) qp;
1464 ( void ) gid;
1465 return 0;
1466 }
1467
1468 /**
1469 * Detach from multicast group
1470 *
1471 * @v ibdev Infiniband device
1472 * @v qp Queue pair
1473 * @v gid Multicast GID
1474 */
linda_mcast_detach(struct ib_device * ibdev,struct ib_queue_pair * qp,struct ib_gid * gid)1475 static void linda_mcast_detach ( struct ib_device *ibdev,
1476 struct ib_queue_pair *qp,
1477 struct ib_gid *gid ) {
1478 struct linda *linda = ib_get_drvdata ( ibdev );
1479
1480 ( void ) linda;
1481 ( void ) qp;
1482 ( void ) gid;
1483 }
1484
1485 /** Linda Infiniband operations */
1486 static struct ib_device_operations linda_ib_operations = {
1487 .create_cq = linda_create_cq,
1488 .destroy_cq = linda_destroy_cq,
1489 .create_qp = linda_create_qp,
1490 .modify_qp = linda_modify_qp,
1491 .destroy_qp = linda_destroy_qp,
1492 .post_send = linda_post_send,
1493 .post_recv = linda_post_recv,
1494 .poll_cq = linda_poll_cq,
1495 .poll_eq = linda_poll_eq,
1496 .open = linda_open,
1497 .close = linda_close,
1498 .mcast_attach = linda_mcast_attach,
1499 .mcast_detach = linda_mcast_detach,
1500 .set_port_info = linda_set_port_info,
1501 .set_pkey_table = linda_set_pkey_table,
1502 };
1503
1504 /***************************************************************************
1505 *
1506 * I2C bus operations
1507 *
1508 ***************************************************************************
1509 */
1510
1511 /** Linda I2C bit to GPIO mappings */
1512 static unsigned int linda_i2c_bits[] = {
1513 [I2C_BIT_SCL] = ( 1 << LINDA_GPIO_SCL ),
1514 [I2C_BIT_SDA] = ( 1 << LINDA_GPIO_SDA ),
1515 };
1516
1517 /**
1518 * Read Linda I2C line status
1519 *
1520 * @v basher Bit-bashing interface
1521 * @v bit_id Bit number
1522 * @ret zero Input is a logic 0
1523 * @ret non-zero Input is a logic 1
1524 */
linda_i2c_read_bit(struct bit_basher * basher,unsigned int bit_id)1525 static int linda_i2c_read_bit ( struct bit_basher *basher,
1526 unsigned int bit_id ) {
1527 struct linda *linda =
1528 container_of ( basher, struct linda, i2c.basher );
1529 struct QIB_7220_EXTStatus extstatus;
1530 unsigned int status;
1531
1532 DBG_DISABLE ( DBGLVL_IO );
1533
1534 linda_readq ( linda, &extstatus, QIB_7220_EXTStatus_offset );
1535 status = ( BIT_GET ( &extstatus, GPIOIn ) & linda_i2c_bits[bit_id] );
1536
1537 DBG_ENABLE ( DBGLVL_IO );
1538
1539 return status;
1540 }
1541
1542 /**
1543 * Write Linda I2C line status
1544 *
1545 * @v basher Bit-bashing interface
1546 * @v bit_id Bit number
1547 * @v data Value to write
1548 */
linda_i2c_write_bit(struct bit_basher * basher,unsigned int bit_id,unsigned long data)1549 static void linda_i2c_write_bit ( struct bit_basher *basher,
1550 unsigned int bit_id, unsigned long data ) {
1551 struct linda *linda =
1552 container_of ( basher, struct linda, i2c.basher );
1553 struct QIB_7220_EXTCtrl extctrl;
1554 struct QIB_7220_GPIO gpioout;
1555 unsigned int bit = linda_i2c_bits[bit_id];
1556 unsigned int outputs = 0;
1557 unsigned int output_enables = 0;
1558
1559 DBG_DISABLE ( DBGLVL_IO );
1560
1561 /* Read current GPIO mask and outputs */
1562 linda_readq ( linda, &extctrl, QIB_7220_EXTCtrl_offset );
1563 linda_readq ( linda, &gpioout, QIB_7220_GPIOOut_offset );
1564
1565 /* Update outputs and output enables. I2C lines are tied
1566 * high, so we always set the output to 0 and use the output
1567 * enable to control the line.
1568 */
1569 output_enables = BIT_GET ( &extctrl, GPIOOe );
1570 output_enables = ( ( output_enables & ~bit ) | ( ~data & bit ) );
1571 outputs = BIT_GET ( &gpioout, GPIO );
1572 outputs = ( outputs & ~bit );
1573 BIT_SET ( &extctrl, GPIOOe, output_enables );
1574 BIT_SET ( &gpioout, GPIO, outputs );
1575
1576 /* Write the output enable first; that way we avoid logic
1577 * hazards.
1578 */
1579 linda_writeq ( linda, &extctrl, QIB_7220_EXTCtrl_offset );
1580 linda_writeq ( linda, &gpioout, QIB_7220_GPIOOut_offset );
1581 mb();
1582
1583 DBG_ENABLE ( DBGLVL_IO );
1584 }
1585
1586 /** Linda I2C bit-bashing interface operations */
1587 static struct bit_basher_operations linda_i2c_basher_ops = {
1588 .read = linda_i2c_read_bit,
1589 .write = linda_i2c_write_bit,
1590 };
1591
1592 /**
1593 * Initialise Linda I2C subsystem
1594 *
1595 * @v linda Linda device
1596 * @ret rc Return status code
1597 */
linda_init_i2c(struct linda * linda)1598 static int linda_init_i2c ( struct linda *linda ) {
1599 static int try_eeprom_address[] = { 0x51, 0x50 };
1600 unsigned int i;
1601 int rc;
1602
1603 /* Initialise bus */
1604 if ( ( rc = init_i2c_bit_basher ( &linda->i2c,
1605 &linda_i2c_basher_ops ) ) != 0 ) {
1606 DBGC ( linda, "Linda %p could not initialise I2C bus: %s\n",
1607 linda, strerror ( rc ) );
1608 return rc;
1609 }
1610
1611 /* Probe for devices */
1612 for ( i = 0 ; i < ( sizeof ( try_eeprom_address ) /
1613 sizeof ( try_eeprom_address[0] ) ) ; i++ ) {
1614 init_i2c_eeprom ( &linda->eeprom, try_eeprom_address[i] );
1615 if ( ( rc = i2c_check_presence ( &linda->i2c.i2c,
1616 &linda->eeprom ) ) == 0 ) {
1617 DBGC2 ( linda, "Linda %p found EEPROM at %02x\n",
1618 linda, try_eeprom_address[i] );
1619 return 0;
1620 }
1621 }
1622
1623 DBGC ( linda, "Linda %p could not find EEPROM\n", linda );
1624 return -ENODEV;
1625 }
1626
1627 /**
1628 * Read EEPROM parameters
1629 *
1630 * @v linda Linda device
1631 * @v guid GUID to fill in
1632 * @ret rc Return status code
1633 */
linda_read_eeprom(struct linda * linda,struct ib_gid_half * guid)1634 static int linda_read_eeprom ( struct linda *linda,
1635 struct ib_gid_half *guid ) {
1636 struct i2c_interface *i2c = &linda->i2c.i2c;
1637 int rc;
1638
1639 /* Read GUID */
1640 if ( ( rc = i2c->read ( i2c, &linda->eeprom, LINDA_EEPROM_GUID_OFFSET,
1641 guid->u.bytes, sizeof ( *guid ) ) ) != 0 ) {
1642 DBGC ( linda, "Linda %p could not read GUID: %s\n",
1643 linda, strerror ( rc ) );
1644 return rc;
1645 }
1646 DBGC2 ( linda, "Linda %p has GUID %02x:%02x:%02x:%02x:%02x:%02x:"
1647 "%02x:%02x\n", linda, guid->u.bytes[0], guid->u.bytes[1],
1648 guid->u.bytes[2], guid->u.bytes[3], guid->u.bytes[4],
1649 guid->u.bytes[5], guid->u.bytes[6], guid->u.bytes[7] );
1650
1651 /* Read serial number (debug only) */
1652 if ( DBG_LOG ) {
1653 uint8_t serial[LINDA_EEPROM_SERIAL_SIZE + 1];
1654
1655 serial[ sizeof ( serial ) - 1 ] = '\0';
1656 if ( ( rc = i2c->read ( i2c, &linda->eeprom,
1657 LINDA_EEPROM_SERIAL_OFFSET, serial,
1658 ( sizeof ( serial ) - 1 ) ) ) != 0 ) {
1659 DBGC ( linda, "Linda %p could not read serial: %s\n",
1660 linda, strerror ( rc ) );
1661 return rc;
1662 }
1663 DBGC2 ( linda, "Linda %p has serial number \"%s\"\n",
1664 linda, serial );
1665 }
1666
1667 return 0;
1668 }
1669
1670 /***************************************************************************
1671 *
1672 * External parallel bus access
1673 *
1674 ***************************************************************************
1675 */
1676
1677 /**
1678 * Request ownership of the IB external parallel bus
1679 *
1680 * @v linda Linda device
1681 * @ret rc Return status code
1682 */
linda_ib_epb_request(struct linda * linda)1683 static int linda_ib_epb_request ( struct linda *linda ) {
1684 struct QIB_7220_ibsd_epb_access_ctrl access;
1685 unsigned int i;
1686
1687 /* Request ownership */
1688 memset ( &access, 0, sizeof ( access ) );
1689 BIT_FILL_1 ( &access, sw_ib_epb_req, 1 );
1690 linda_writeq ( linda, &access, QIB_7220_ibsd_epb_access_ctrl_offset );
1691
1692 /* Wait for ownership to be granted */
1693 for ( i = 0 ; i < LINDA_EPB_REQUEST_MAX_WAIT_US ; i++ ) {
1694 linda_readq ( linda, &access,
1695 QIB_7220_ibsd_epb_access_ctrl_offset );
1696 if ( BIT_GET ( &access, sw_ib_epb_req_granted ) )
1697 return 0;
1698 udelay ( 1 );
1699 }
1700
1701 DBGC ( linda, "Linda %p timed out waiting for IB EPB request\n",
1702 linda );
1703 return -ETIMEDOUT;
1704 }
1705
1706 /**
1707 * Wait for IB external parallel bus transaction to complete
1708 *
1709 * @v linda Linda device
1710 * @v xact Buffer to hold transaction result
1711 * @ret rc Return status code
1712 */
linda_ib_epb_wait(struct linda * linda,struct QIB_7220_ibsd_epb_transaction_reg * xact)1713 static int linda_ib_epb_wait ( struct linda *linda,
1714 struct QIB_7220_ibsd_epb_transaction_reg *xact ) {
1715 unsigned int i;
1716
1717 /* Discard first read to allow for signals crossing clock domains */
1718 linda_readq ( linda, xact, QIB_7220_ibsd_epb_transaction_reg_offset );
1719
1720 for ( i = 0 ; i < LINDA_EPB_XACT_MAX_WAIT_US ; i++ ) {
1721 linda_readq ( linda, xact,
1722 QIB_7220_ibsd_epb_transaction_reg_offset );
1723 if ( BIT_GET ( xact, ib_epb_rdy ) ) {
1724 if ( BIT_GET ( xact, ib_epb_req_error ) ) {
1725 DBGC ( linda, "Linda %p EPB transaction "
1726 "failed\n", linda );
1727 return -EIO;
1728 } else {
1729 return 0;
1730 }
1731 }
1732 udelay ( 1 );
1733 }
1734
1735 DBGC ( linda, "Linda %p timed out waiting for IB EPB transaction\n",
1736 linda );
1737 return -ETIMEDOUT;
1738 }
1739
1740 /**
1741 * Release ownership of the IB external parallel bus
1742 *
1743 * @v linda Linda device
1744 */
linda_ib_epb_release(struct linda * linda)1745 static void linda_ib_epb_release ( struct linda *linda ) {
1746 struct QIB_7220_ibsd_epb_access_ctrl access;
1747
1748 memset ( &access, 0, sizeof ( access ) );
1749 BIT_FILL_1 ( &access, sw_ib_epb_req, 0 );
1750 linda_writeq ( linda, &access, QIB_7220_ibsd_epb_access_ctrl_offset );
1751 }
1752
1753 /**
1754 * Read data via IB external parallel bus
1755 *
1756 * @v linda Linda device
1757 * @v location EPB location
1758 * @ret data Data read, or negative error
1759 *
1760 * You must have already acquired ownership of the IB external
1761 * parallel bus.
1762 */
linda_ib_epb_read(struct linda * linda,unsigned int location)1763 static int linda_ib_epb_read ( struct linda *linda, unsigned int location ) {
1764 struct QIB_7220_ibsd_epb_transaction_reg xact;
1765 unsigned int data;
1766 int rc;
1767
1768 /* Ensure no transaction is currently in progress */
1769 if ( ( rc = linda_ib_epb_wait ( linda, &xact ) ) != 0 )
1770 return rc;
1771
1772 /* Process data */
1773 memset ( &xact, 0, sizeof ( xact ) );
1774 BIT_FILL_3 ( &xact,
1775 ib_epb_address, LINDA_EPB_LOC_ADDRESS ( location ),
1776 ib_epb_read_write, LINDA_EPB_READ,
1777 ib_epb_cs, LINDA_EPB_LOC_CS ( location ) );
1778 linda_writeq ( linda, &xact,
1779 QIB_7220_ibsd_epb_transaction_reg_offset );
1780
1781 /* Wait for transaction to complete */
1782 if ( ( rc = linda_ib_epb_wait ( linda, &xact ) ) != 0 )
1783 return rc;
1784
1785 data = BIT_GET ( &xact, ib_epb_data );
1786 return data;
1787 }
1788
1789 /**
1790 * Write data via IB external parallel bus
1791 *
1792 * @v linda Linda device
1793 * @v location EPB location
1794 * @v data Data to write
1795 * @ret rc Return status code
1796 *
1797 * You must have already acquired ownership of the IB external
1798 * parallel bus.
1799 */
linda_ib_epb_write(struct linda * linda,unsigned int location,unsigned int data)1800 static int linda_ib_epb_write ( struct linda *linda, unsigned int location,
1801 unsigned int data ) {
1802 struct QIB_7220_ibsd_epb_transaction_reg xact;
1803 int rc;
1804
1805 /* Ensure no transaction is currently in progress */
1806 if ( ( rc = linda_ib_epb_wait ( linda, &xact ) ) != 0 )
1807 return rc;
1808
1809 /* Process data */
1810 memset ( &xact, 0, sizeof ( xact ) );
1811 BIT_FILL_4 ( &xact,
1812 ib_epb_data, data,
1813 ib_epb_address, LINDA_EPB_LOC_ADDRESS ( location ),
1814 ib_epb_read_write, LINDA_EPB_WRITE,
1815 ib_epb_cs, LINDA_EPB_LOC_CS ( location ) );
1816 linda_writeq ( linda, &xact,
1817 QIB_7220_ibsd_epb_transaction_reg_offset );
1818
1819 /* Wait for transaction to complete */
1820 if ( ( rc = linda_ib_epb_wait ( linda, &xact ) ) != 0 )
1821 return rc;
1822
1823 return 0;
1824 }
1825
1826 /**
1827 * Read/modify/write EPB register
1828 *
1829 * @v linda Linda device
1830 * @v cs Chip select
1831 * @v channel Channel
1832 * @v element Element
1833 * @v reg Register
1834 * @v value Value to set
1835 * @v mask Mask to apply to old value
1836 * @ret rc Return status code
1837 */
linda_ib_epb_mod_reg(struct linda * linda,unsigned int cs,unsigned int channel,unsigned int element,unsigned int reg,unsigned int value,unsigned int mask)1838 static int linda_ib_epb_mod_reg ( struct linda *linda, unsigned int cs,
1839 unsigned int channel, unsigned int element,
1840 unsigned int reg, unsigned int value,
1841 unsigned int mask ) {
1842 unsigned int location;
1843 int old_value;
1844 int rc;
1845
1846 DBG_DISABLE ( DBGLVL_IO );
1847
1848 /* Sanity check */
1849 assert ( ( value & mask ) == value );
1850
1851 /* Acquire bus ownership */
1852 if ( ( rc = linda_ib_epb_request ( linda ) ) != 0 )
1853 goto out;
1854
1855 /* Read existing value, if necessary */
1856 location = LINDA_EPB_LOC ( cs, channel, element, reg );
1857 if ( (~mask) & 0xff ) {
1858 old_value = linda_ib_epb_read ( linda, location );
1859 if ( old_value < 0 ) {
1860 rc = old_value;
1861 goto out_release;
1862 }
1863 } else {
1864 old_value = 0;
1865 }
1866
1867 /* Update value */
1868 value = ( ( old_value & ~mask ) | value );
1869 DBGCP ( linda, "Linda %p CS %d EPB(%d,%d,%#02x) %#02x => %#02x\n",
1870 linda, cs, channel, element, reg, old_value, value );
1871 if ( ( rc = linda_ib_epb_write ( linda, location, value ) ) != 0 )
1872 goto out_release;
1873
1874 out_release:
1875 /* Release bus */
1876 linda_ib_epb_release ( linda );
1877 out:
1878 DBG_ENABLE ( DBGLVL_IO );
1879 return rc;
1880 }
1881
1882 /**
1883 * Transfer data to/from microcontroller RAM
1884 *
1885 * @v linda Linda device
1886 * @v address Starting address
1887 * @v write Data to write, or NULL
1888 * @v read Data to read, or NULL
1889 * @v len Length of data
1890 * @ret rc Return status code
1891 */
linda_ib_epb_ram_xfer(struct linda * linda,unsigned int address,const void * write,void * read,size_t len)1892 static int linda_ib_epb_ram_xfer ( struct linda *linda, unsigned int address,
1893 const void *write, void *read,
1894 size_t len ) {
1895 unsigned int control;
1896 unsigned int address_hi;
1897 unsigned int address_lo;
1898 int data;
1899 int rc;
1900
1901 DBG_DISABLE ( DBGLVL_IO );
1902
1903 assert ( ! ( write && read ) );
1904 assert ( ( address % LINDA_EPB_UC_CHUNK_SIZE ) == 0 );
1905 assert ( ( len % LINDA_EPB_UC_CHUNK_SIZE ) == 0 );
1906
1907 /* Acquire bus ownership */
1908 if ( ( rc = linda_ib_epb_request ( linda ) ) != 0 )
1909 goto out;
1910
1911 /* Process data */
1912 while ( len ) {
1913
1914 /* Reset the address for each new chunk */
1915 if ( ( address % LINDA_EPB_UC_CHUNK_SIZE ) == 0 ) {
1916
1917 /* Write the control register */
1918 control = ( read ? LINDA_EPB_UC_CTL_READ :
1919 LINDA_EPB_UC_CTL_WRITE );
1920 if ( ( rc = linda_ib_epb_write ( linda,
1921 LINDA_EPB_UC_CTL,
1922 control ) ) != 0 )
1923 break;
1924
1925 /* Write the address registers */
1926 address_hi = ( address >> 8 );
1927 if ( ( rc = linda_ib_epb_write ( linda,
1928 LINDA_EPB_UC_ADDR_HI,
1929 address_hi ) ) != 0 )
1930 break;
1931 address_lo = ( address & 0xff );
1932 if ( ( rc = linda_ib_epb_write ( linda,
1933 LINDA_EPB_UC_ADDR_LO,
1934 address_lo ) ) != 0 )
1935 break;
1936 }
1937
1938 /* Read or write the data */
1939 if ( read ) {
1940 data = linda_ib_epb_read ( linda, LINDA_EPB_UC_DATA );
1941 if ( data < 0 ) {
1942 rc = data;
1943 break;
1944 }
1945 *( ( uint8_t * ) read++ ) = data;
1946 } else {
1947 data = *( ( uint8_t * ) write++ );
1948 if ( ( rc = linda_ib_epb_write ( linda,
1949 LINDA_EPB_UC_DATA,
1950 data ) ) != 0 )
1951 break;
1952 }
1953 address++;
1954 len--;
1955
1956 /* Reset the control byte after each chunk */
1957 if ( ( address % LINDA_EPB_UC_CHUNK_SIZE ) == 0 ) {
1958 if ( ( rc = linda_ib_epb_write ( linda,
1959 LINDA_EPB_UC_CTL,
1960 0 ) ) != 0 )
1961 break;
1962 }
1963 }
1964
1965 /* Release bus */
1966 linda_ib_epb_release ( linda );
1967
1968 out:
1969 DBG_ENABLE ( DBGLVL_IO );
1970 return rc;
1971 }
1972
1973 /***************************************************************************
1974 *
1975 * Infiniband SerDes initialisation
1976 *
1977 ***************************************************************************
1978 */
1979
1980 /** A Linda SerDes parameter */
1981 struct linda_serdes_param {
1982 /** EPB address as constructed by LINDA_EPB_ADDRESS() */
1983 uint16_t address;
1984 /** Value to set */
1985 uint8_t value;
1986 /** Mask to apply to old value */
1987 uint8_t mask;
1988 } __packed;
1989
1990 /** Magic "all channels" channel number */
1991 #define LINDA_EPB_ALL_CHANNELS 31
1992
1993 /** End of SerDes parameter list marker */
1994 #define LINDA_SERDES_PARAM_END { 0, 0, 0 }
1995
1996 /**
1997 * Program IB SerDes register(s)
1998 *
1999 * @v linda Linda device
2000 * @v param SerDes parameter
2001 * @ret rc Return status code
2002 */
linda_set_serdes_param(struct linda * linda,struct linda_serdes_param * param)2003 static int linda_set_serdes_param ( struct linda *linda,
2004 struct linda_serdes_param *param ) {
2005 unsigned int channel;
2006 unsigned int channel_start;
2007 unsigned int channel_end;
2008 unsigned int element;
2009 unsigned int reg;
2010 int rc;
2011
2012 /* Break down the EPB address and determine channels */
2013 channel = LINDA_EPB_ADDRESS_CHANNEL ( param->address );
2014 element = LINDA_EPB_ADDRESS_ELEMENT ( param->address );
2015 reg = LINDA_EPB_ADDRESS_REG ( param->address );
2016 if ( channel == LINDA_EPB_ALL_CHANNELS ) {
2017 channel_start = 0;
2018 channel_end = 3;
2019 } else {
2020 channel_start = channel_end = channel;
2021 }
2022
2023 /* Modify register for each specified channel */
2024 for ( channel = channel_start ; channel <= channel_end ; channel++ ) {
2025 if ( ( rc = linda_ib_epb_mod_reg ( linda, LINDA_EPB_CS_SERDES,
2026 channel, element, reg,
2027 param->value,
2028 param->mask ) ) != 0 )
2029 return rc;
2030 }
2031
2032 return 0;
2033 }
2034
2035 /**
2036 * Program IB SerDes registers
2037 *
2038 * @v linda Linda device
2039 * @v param SerDes parameters
2040 * @v count Number of parameters
2041 * @ret rc Return status code
2042 */
linda_set_serdes_params(struct linda * linda,struct linda_serdes_param * params)2043 static int linda_set_serdes_params ( struct linda *linda,
2044 struct linda_serdes_param *params ) {
2045 int rc;
2046
2047 for ( ; params->mask != 0 ; params++ ){
2048 if ( ( rc = linda_set_serdes_param ( linda,
2049 params ) ) != 0 )
2050 return rc;
2051 }
2052
2053 return 0;
2054 }
2055
2056 #define LINDA_DDS_VAL( amp_d, main_d, ipst_d, ipre_d, \
2057 amp_s, main_s, ipst_s, ipre_s ) \
2058 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 9, 0x00 ), \
2059 ( ( ( amp_d & 0x1f ) << 1 ) | 1 ), 0xff }, \
2060 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 9, 0x01 ), \
2061 ( ( ( amp_s & 0x1f ) << 1 ) | 1 ), 0xff }, \
2062 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 9, 0x09 ), \
2063 ( ( main_d << 3 ) | 4 | ( ipre_d >> 2 ) ), 0xff }, \
2064 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 9, 0x0a ), \
2065 ( ( main_s << 3 ) | 4 | ( ipre_s >> 2 ) ), 0xff }, \
2066 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 9, 0x06 ), \
2067 ( ( ( ipst_d & 0xf ) << 1 ) | \
2068 ( ( ipre_d & 3 ) << 6 ) | 0x21 ), 0xff }, \
2069 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 9, 0x07 ), \
2070 ( ( ( ipst_s & 0xf ) << 1 ) | \
2071 ( ( ipre_s & 3 ) << 6) | 0x21 ), 0xff }
2072
2073 /**
2074 * Linda SerDes default parameters
2075 *
2076 * These magic start-of-day values are taken from the Linux driver.
2077 */
2078 static struct linda_serdes_param linda_serdes_defaults1[] = {
2079 /* RXHSCTRL0 */
2080 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 6, 0x00 ), 0xd4, 0xff },
2081 /* VCDL_DAC2 */
2082 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 6, 0x05 ), 0x2d, 0xff },
2083 /* VCDL_CTRL2 */
2084 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 6, 0x08 ), 0x03, 0x0f },
2085 /* START_EQ1 */
2086 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 7, 0x27 ), 0x10, 0xff },
2087 /* START_EQ2 */
2088 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 7, 0x28 ), 0x30, 0xff },
2089 /* BACTRL */
2090 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 6, 0x0e ), 0x40, 0xff },
2091 /* LDOUTCTRL1 */
2092 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 7, 0x06 ), 0x04, 0xff },
2093 /* RXHSSTATUS */
2094 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 6, 0x0f ), 0x04, 0xff },
2095 /* End of this block */
2096 LINDA_SERDES_PARAM_END
2097 };
2098 static struct linda_serdes_param linda_serdes_defaults2[] = {
2099 /* LDOUTCTRL1 */
2100 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 7, 0x06 ), 0x00, 0xff },
2101 /* DDS values */
2102 LINDA_DDS_VAL ( 31, 19, 12, 0, 29, 22, 9, 0 ),
2103 /* Set Rcv Eq. to Preset node */
2104 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 7, 0x27 ), 0x10, 0xff },
2105 /* DFELTHFDR */
2106 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 7, 0x08 ), 0x00, 0xff },
2107 /* DFELTHHDR */
2108 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 7, 0x21 ), 0x00, 0xff },
2109 /* TLTHFDR */
2110 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 7, 0x09 ), 0x02, 0xff },
2111 /* TLTHHDR */
2112 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 7, 0x23 ), 0x02, 0xff },
2113 /* ZFR */
2114 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 7, 0x1b ), 0x0c, 0xff },
2115 /* ZCNT) */
2116 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 7, 0x1c ), 0x0c, 0xff },
2117 /* GFR */
2118 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 7, 0x1e ), 0x10, 0xff },
2119 /* GHR */
2120 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 7, 0x1f ), 0x10, 0xff },
2121 /* VCDL_CTRL0 toggle */
2122 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 6, 0x06 ), 0x20, 0xff },
2123 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 6, 0x06 ), 0x00, 0xff },
2124 /* CMUCTRL5 */
2125 { LINDA_EPB_ADDRESS ( 7, 0, 0x15 ), 0x80, 0xff },
2126 /* End of this block */
2127 LINDA_SERDES_PARAM_END
2128 };
2129 static struct linda_serdes_param linda_serdes_defaults3[] = {
2130 /* START_EQ1 */
2131 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 7, 0x27 ), 0x00, 0x38 },
2132 /* End of this block */
2133 LINDA_SERDES_PARAM_END
2134 };
2135
2136 /**
2137 * Program the microcontroller RAM
2138 *
2139 * @v linda Linda device
2140 * @ret rc Return status code
2141 */
linda_program_uc_ram(struct linda * linda)2142 static int linda_program_uc_ram ( struct linda *linda ) {
2143 int rc;
2144
2145 if ( ( rc = linda_ib_epb_ram_xfer ( linda, 0, linda_ib_fw, NULL,
2146 sizeof ( linda_ib_fw ) ) ) != 0 ){
2147 DBGC ( linda, "Linda %p could not load IB firmware: %s\n",
2148 linda, strerror ( rc ) );
2149 return rc;
2150 }
2151
2152 return 0;
2153 }
2154
2155 /**
2156 * Verify the microcontroller RAM
2157 *
2158 * @v linda Linda device
2159 * @ret rc Return status code
2160 */
linda_verify_uc_ram(struct linda * linda)2161 static int linda_verify_uc_ram ( struct linda *linda ) {
2162 uint8_t verify[LINDA_EPB_UC_CHUNK_SIZE];
2163 unsigned int offset;
2164 int rc;
2165
2166 for ( offset = 0 ; offset < sizeof ( linda_ib_fw );
2167 offset += sizeof ( verify ) ) {
2168 if ( ( rc = linda_ib_epb_ram_xfer ( linda, offset,
2169 NULL, verify,
2170 sizeof (verify) )) != 0 ){
2171 DBGC ( linda, "Linda %p could not read back IB "
2172 "firmware: %s\n", linda, strerror ( rc ) );
2173 return rc;
2174 }
2175 if ( memcmp ( ( linda_ib_fw + offset ), verify,
2176 sizeof ( verify ) ) != 0 ) {
2177 DBGC ( linda, "Linda %p firmware verification failed "
2178 "at offset %#x\n", linda, offset );
2179 DBGC_HDA ( linda, offset, ( linda_ib_fw + offset ),
2180 sizeof ( verify ) );
2181 DBGC_HDA ( linda, offset, verify, sizeof ( verify ) );
2182 return -EIO;
2183 }
2184 }
2185
2186 DBGC2 ( linda, "Linda %p firmware verified ok\n", linda );
2187 return 0;
2188 }
2189
2190 /**
2191 * Use the microcontroller to trim the IB link
2192 *
2193 * @v linda Linda device
2194 * @ret rc Return status code
2195 */
linda_trim_ib(struct linda * linda)2196 static int linda_trim_ib ( struct linda *linda ) {
2197 struct QIB_7220_IBSerDesCtrl ctrl;
2198 struct QIB_7220_IntStatus intstatus;
2199 unsigned int i;
2200 int rc;
2201
2202 /* Bring the microcontroller out of reset */
2203 linda_readq ( linda, &ctrl, QIB_7220_IBSerDesCtrl_offset );
2204 BIT_SET ( &ctrl, ResetIB_uC_Core, 0 );
2205 linda_writeq ( linda, &ctrl, QIB_7220_IBSerDesCtrl_offset );
2206
2207 /* Wait for the "trim done" signal */
2208 for ( i = 0 ; i < LINDA_TRIM_DONE_MAX_WAIT_MS ; i++ ) {
2209 linda_readq ( linda, &intstatus, QIB_7220_IntStatus_offset );
2210 if ( BIT_GET ( &intstatus, IBSerdesTrimDone ) ) {
2211 rc = 0;
2212 goto out_reset;
2213 }
2214 mdelay ( 1 );
2215 }
2216
2217 DBGC ( linda, "Linda %p timed out waiting for trim done\n", linda );
2218 rc = -ETIMEDOUT;
2219 out_reset:
2220 /* Put the microcontroller back into reset */
2221 BIT_SET ( &ctrl, ResetIB_uC_Core, 1 );
2222 linda_writeq ( linda, &ctrl, QIB_7220_IBSerDesCtrl_offset );
2223
2224 return rc;
2225 }
2226
2227 /**
2228 * Initialise the IB SerDes
2229 *
2230 * @v linda Linda device
2231 * @ret rc Return status code
2232 */
linda_init_ib_serdes(struct linda * linda)2233 static int linda_init_ib_serdes ( struct linda *linda ) {
2234 struct QIB_7220_Control control;
2235 struct QIB_7220_IBCCtrl ibcctrl;
2236 struct QIB_7220_IBCDDRCtrl ibcddrctrl;
2237 struct QIB_7220_XGXSCfg xgxscfg;
2238 int rc;
2239
2240 /* Disable link */
2241 linda_readq ( linda, &control, QIB_7220_Control_offset );
2242 BIT_SET ( &control, LinkEn, 0 );
2243 linda_writeq ( linda, &control, QIB_7220_Control_offset );
2244
2245 /* Configure sensible defaults for IBC */
2246 memset ( &ibcctrl, 0, sizeof ( ibcctrl ) );
2247 BIT_FILL_6 ( &ibcctrl, /* Tuning values taken from Linux driver */
2248 FlowCtrlPeriod, 0x03,
2249 FlowCtrlWaterMark, 0x05,
2250 MaxPktLen, ( ( LINDA_RECV_HEADER_SIZE +
2251 LINDA_RECV_PAYLOAD_SIZE +
2252 4 /* ICRC */ ) >> 2 ),
2253 PhyerrThreshold, 0xf,
2254 OverrunThreshold, 0xf,
2255 CreditScale, 0x4 );
2256 linda_writeq ( linda, &ibcctrl, QIB_7220_IBCCtrl_offset );
2257
2258 /* Force SDR only to avoid needing all the DDR tuning,
2259 * Mellanox compatibility hacks etc. SDR is plenty for
2260 * boot-time operation.
2261 */
2262 linda_readq ( linda, &ibcddrctrl, QIB_7220_IBCDDRCtrl_offset );
2263 BIT_SET ( &ibcddrctrl, IB_ENHANCED_MODE, 0 );
2264 BIT_SET ( &ibcddrctrl, SD_SPEED_SDR, 1 );
2265 BIT_SET ( &ibcddrctrl, SD_SPEED_DDR, 0 );
2266 BIT_SET ( &ibcddrctrl, SD_SPEED_QDR, 0 );
2267 BIT_SET ( &ibcddrctrl, HRTBT_ENB, 0 );
2268 BIT_SET ( &ibcddrctrl, HRTBT_AUTO, 0 );
2269 linda_writeq ( linda, &ibcddrctrl, QIB_7220_IBCDDRCtrl_offset );
2270
2271 /* Set default SerDes parameters */
2272 if ( ( rc = linda_set_serdes_params ( linda,
2273 linda_serdes_defaults1 ) ) != 0 )
2274 return rc;
2275 udelay ( 415 ); /* Magic delay while SerDes sorts itself out */
2276 if ( ( rc = linda_set_serdes_params ( linda,
2277 linda_serdes_defaults2 ) ) != 0 )
2278 return rc;
2279
2280 /* Program the microcontroller RAM */
2281 if ( ( rc = linda_program_uc_ram ( linda ) ) != 0 )
2282 return rc;
2283
2284 /* Verify the microcontroller RAM contents */
2285 if ( DBGLVL_LOG ) {
2286 if ( ( rc = linda_verify_uc_ram ( linda ) ) != 0 )
2287 return rc;
2288 }
2289
2290 /* More SerDes tuning */
2291 if ( ( rc = linda_set_serdes_params ( linda,
2292 linda_serdes_defaults3 ) ) != 0 )
2293 return rc;
2294
2295 /* Use the microcontroller to trim the IB link */
2296 if ( ( rc = linda_trim_ib ( linda ) ) != 0 )
2297 return rc;
2298
2299 /* Bring XGXS out of reset */
2300 linda_readq ( linda, &xgxscfg, QIB_7220_XGXSCfg_offset );
2301 BIT_SET ( &xgxscfg, tx_rx_reset, 0 );
2302 BIT_SET ( &xgxscfg, xcv_reset, 0 );
2303 linda_writeq ( linda, &xgxscfg, QIB_7220_XGXSCfg_offset );
2304
2305 return rc;
2306 }
2307
2308 /***************************************************************************
2309 *
2310 * PCI layer interface
2311 *
2312 ***************************************************************************
2313 */
2314
2315 /**
2316 * Probe PCI device
2317 *
2318 * @v pci PCI device
2319 * @v id PCI ID
2320 * @ret rc Return status code
2321 */
linda_probe(struct pci_device * pci,const struct pci_device_id * id __unused)2322 static int linda_probe ( struct pci_device *pci,
2323 const struct pci_device_id *id __unused ) {
2324 struct ib_device *ibdev;
2325 struct linda *linda;
2326 struct QIB_7220_Revision revision;
2327 int rc;
2328
2329 /* Allocate Infiniband device */
2330 ibdev = alloc_ibdev ( sizeof ( *linda ) );
2331 if ( ! ibdev ) {
2332 rc = -ENOMEM;
2333 goto err_alloc_ibdev;
2334 }
2335 pci_set_drvdata ( pci, ibdev );
2336 linda = ib_get_drvdata ( ibdev );
2337 ibdev->op = &linda_ib_operations;
2338 ibdev->dev = &pci->dev;
2339 ibdev->port = 1;
2340
2341 /* Fix up PCI device */
2342 adjust_pci_device ( pci );
2343
2344 /* Get PCI BARs */
2345 linda->regs = ioremap ( pci->membase, LINDA_BAR0_SIZE );
2346 DBGC2 ( linda, "Linda %p has BAR at %08lx\n", linda, pci->membase );
2347
2348 /* Print some general data */
2349 linda_readq ( linda, &revision, QIB_7220_Revision_offset );
2350 DBGC2 ( linda, "Linda %p board %02lx v%ld.%ld.%ld.%ld\n", linda,
2351 BIT_GET ( &revision, BoardID ),
2352 BIT_GET ( &revision, R_SW ),
2353 BIT_GET ( &revision, R_Arch ),
2354 BIT_GET ( &revision, R_ChipRevMajor ),
2355 BIT_GET ( &revision, R_ChipRevMinor ) );
2356
2357 /* Record link capabilities. Note that we force SDR only to
2358 * avoid having to carry extra code for DDR tuning etc.
2359 */
2360 ibdev->link_width_enabled = ibdev->link_width_supported =
2361 ( IB_LINK_WIDTH_4X | IB_LINK_WIDTH_1X );
2362 ibdev->link_speed_enabled = ibdev->link_speed_supported =
2363 IB_LINK_SPEED_SDR;
2364
2365 /* Initialise I2C subsystem */
2366 if ( ( rc = linda_init_i2c ( linda ) ) != 0 )
2367 goto err_init_i2c;
2368
2369 /* Read EEPROM parameters */
2370 if ( ( rc = linda_read_eeprom ( linda, &ibdev->gid.u.half[1] ) ) != 0 )
2371 goto err_read_eeprom;
2372
2373 /* Initialise send datapath */
2374 if ( ( rc = linda_init_send ( linda ) ) != 0 )
2375 goto err_init_send;
2376
2377 /* Initialise receive datapath */
2378 if ( ( rc = linda_init_recv ( linda ) ) != 0 )
2379 goto err_init_recv;
2380
2381 /* Initialise the IB SerDes */
2382 if ( ( rc = linda_init_ib_serdes ( linda ) ) != 0 )
2383 goto err_init_ib_serdes;
2384
2385 /* Register Infiniband device */
2386 if ( ( rc = register_ibdev ( ibdev ) ) != 0 ) {
2387 DBGC ( linda, "Linda %p could not register IB "
2388 "device: %s\n", linda, strerror ( rc ) );
2389 goto err_register_ibdev;
2390 }
2391
2392 return 0;
2393
2394 unregister_ibdev ( ibdev );
2395 err_register_ibdev:
2396 linda_fini_recv ( linda );
2397 err_init_recv:
2398 linda_fini_send ( linda );
2399 err_init_send:
2400 err_init_ib_serdes:
2401 err_read_eeprom:
2402 err_init_i2c:
2403 ibdev_put ( ibdev );
2404 err_alloc_ibdev:
2405 return rc;
2406 }
2407
2408 /**
2409 * Remove PCI device
2410 *
2411 * @v pci PCI device
2412 */
linda_remove(struct pci_device * pci)2413 static void linda_remove ( struct pci_device *pci ) {
2414 struct ib_device *ibdev = pci_get_drvdata ( pci );
2415 struct linda *linda = ib_get_drvdata ( ibdev );
2416
2417 unregister_ibdev ( ibdev );
2418 linda_fini_recv ( linda );
2419 linda_fini_send ( linda );
2420 ibdev_put ( ibdev );
2421 }
2422
2423 static struct pci_device_id linda_nics[] = {
2424 PCI_ROM ( 0x1077, 0x7220, "iba7220", "QLE7240/7280 HCA driver", 0 ),
2425 };
2426
2427 struct pci_driver linda_driver __pci_driver = {
2428 .ids = linda_nics,
2429 .id_count = ( sizeof ( linda_nics ) / sizeof ( linda_nics[0] ) ),
2430 .probe = linda_probe,
2431 .remove = linda_remove,
2432 };
2433