• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Aic94xx SAS/SATA driver sequencer interface.
3  *
4  * Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
5  * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6  *
7  * Parts of this code adapted from David Chaw's adp94xx_seq.c.
8  *
9  * This file is licensed under GPLv2.
10  *
11  * This file is part of the aic94xx driver.
12  *
13  * The aic94xx driver is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU General Public License as
15  * published by the Free Software Foundation; version 2 of the
16  * License.
17  *
18  * The aic94xx driver is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
21  * General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with the aic94xx driver; if not, write to the Free Software
25  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
26  *
27  */
28 
29 #include <linux/delay.h>
30 #include <linux/pci.h>
31 #include <linux/module.h>
32 #include <linux/firmware.h>
33 #include "aic94xx_reg.h"
34 #include "aic94xx_hwi.h"
35 
36 #include "aic94xx_seq.h"
37 #include "aic94xx_dump.h"
38 
39 /* It takes no more than 0.05 us for an instruction
40  * to complete. So waiting for 1 us should be more than
41  * plenty.
42  */
43 #define PAUSE_DELAY 1
44 #define PAUSE_TRIES 1000
45 
46 static const struct firmware *sequencer_fw;
47 static u16 cseq_vecs[CSEQ_NUM_VECS], lseq_vecs[LSEQ_NUM_VECS], mode2_task,
48 	cseq_idle_loop, lseq_idle_loop;
49 static const u8 *cseq_code, *lseq_code;
50 static u32 cseq_code_size, lseq_code_size;
51 
52 static u16 first_scb_site_no = 0xFFFF;
53 static u16 last_scb_site_no;
54 
55 /* ---------- Pause/Unpause CSEQ/LSEQ ---------- */
56 
57 /**
58  * asd_pause_cseq - pause the central sequencer
59  * @asd_ha: pointer to host adapter structure
60  *
61  * Return 0 on success, negative on failure.
62  */
asd_pause_cseq(struct asd_ha_struct * asd_ha)63 static int asd_pause_cseq(struct asd_ha_struct *asd_ha)
64 {
65 	int	count = PAUSE_TRIES;
66 	u32	arp2ctl;
67 
68 	arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL);
69 	if (arp2ctl & PAUSED)
70 		return 0;
71 
72 	asd_write_reg_dword(asd_ha, CARP2CTL, arp2ctl | EPAUSE);
73 	do {
74 		arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL);
75 		if (arp2ctl & PAUSED)
76 			return 0;
77 		udelay(PAUSE_DELAY);
78 	} while (--count > 0);
79 
80 	ASD_DPRINTK("couldn't pause CSEQ\n");
81 	return -1;
82 }
83 
84 /**
85  * asd_unpause_cseq - unpause the central sequencer.
86  * @asd_ha: pointer to host adapter structure.
87  *
88  * Return 0 on success, negative on error.
89  */
asd_unpause_cseq(struct asd_ha_struct * asd_ha)90 static int asd_unpause_cseq(struct asd_ha_struct *asd_ha)
91 {
92 	u32	arp2ctl;
93 	int	count = PAUSE_TRIES;
94 
95 	arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL);
96 	if (!(arp2ctl & PAUSED))
97 		return 0;
98 
99 	asd_write_reg_dword(asd_ha, CARP2CTL, arp2ctl & ~EPAUSE);
100 	do {
101 		arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL);
102 		if (!(arp2ctl & PAUSED))
103 			return 0;
104 		udelay(PAUSE_DELAY);
105 	} while (--count > 0);
106 
107 	ASD_DPRINTK("couldn't unpause the CSEQ\n");
108 	return -1;
109 }
110 
111 /**
112  * asd_seq_pause_lseq - pause a link sequencer
113  * @asd_ha: pointer to a host adapter structure
114  * @lseq: link sequencer of interest
115  *
116  * Return 0 on success, negative on error.
117  */
asd_seq_pause_lseq(struct asd_ha_struct * asd_ha,int lseq)118 static int asd_seq_pause_lseq(struct asd_ha_struct *asd_ha, int lseq)
119 {
120 	u32    arp2ctl;
121 	int    count = PAUSE_TRIES;
122 
123 	arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq));
124 	if (arp2ctl & PAUSED)
125 		return 0;
126 
127 	asd_write_reg_dword(asd_ha, LmARP2CTL(lseq), arp2ctl | EPAUSE);
128 	do {
129 		arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq));
130 		if (arp2ctl & PAUSED)
131 			return 0;
132 		udelay(PAUSE_DELAY);
133 	} while (--count > 0);
134 
135 	ASD_DPRINTK("couldn't pause LSEQ %d\n", lseq);
136 	return -1;
137 }
138 
139 /**
140  * asd_pause_lseq - pause the link sequencer(s)
141  * @asd_ha: pointer to host adapter structure
142  * @lseq_mask: mask of link sequencers of interest
143  *
144  * Return 0 on success, negative on failure.
145  */
asd_pause_lseq(struct asd_ha_struct * asd_ha,u8 lseq_mask)146 static int asd_pause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask)
147 {
148 	int lseq;
149 	int err = 0;
150 
151 	for_each_sequencer(lseq_mask, lseq_mask, lseq) {
152 		err = asd_seq_pause_lseq(asd_ha, lseq);
153 		if (err)
154 			return err;
155 	}
156 
157 	return err;
158 }
159 
160 /**
161  * asd_seq_unpause_lseq - unpause a link sequencer
162  * @asd_ha: pointer to host adapter structure
163  * @lseq: link sequencer of interest
164  *
165  * Return 0 on success, negative on error.
166  */
asd_seq_unpause_lseq(struct asd_ha_struct * asd_ha,int lseq)167 static int asd_seq_unpause_lseq(struct asd_ha_struct *asd_ha, int lseq)
168 {
169 	u32 arp2ctl;
170 	int count = PAUSE_TRIES;
171 
172 	arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq));
173 	if (!(arp2ctl & PAUSED))
174 		return 0;
175 
176 	asd_write_reg_dword(asd_ha, LmARP2CTL(lseq), arp2ctl & ~EPAUSE);
177 	do {
178 		arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq));
179 		if (!(arp2ctl & PAUSED))
180 			return 0;
181 		udelay(PAUSE_DELAY);
182 	} while (--count > 0);
183 
184 	ASD_DPRINTK("couldn't unpause LSEQ %d\n", lseq);
185 	return 0;
186 }
187 
188 
189 /* ---------- Downloading CSEQ/LSEQ microcode ---------- */
190 
asd_verify_cseq(struct asd_ha_struct * asd_ha,const u8 * _prog,u32 size)191 static int asd_verify_cseq(struct asd_ha_struct *asd_ha, const u8 *_prog,
192 			   u32 size)
193 {
194 	u32 addr = CSEQ_RAM_REG_BASE_ADR;
195 	const u32 *prog = (u32 *) _prog;
196 	u32 i;
197 
198 	for (i = 0; i < size; i += 4, prog++, addr += 4) {
199 		u32 val = asd_read_reg_dword(asd_ha, addr);
200 
201 		if (le32_to_cpu(*prog) != val) {
202 			asd_printk("%s: cseq verify failed at %u "
203 				   "read:0x%x, wanted:0x%x\n",
204 				   pci_name(asd_ha->pcidev),
205 				   i, val, le32_to_cpu(*prog));
206 			return -1;
207 		}
208 	}
209 	ASD_DPRINTK("verified %d bytes, passed\n", size);
210 	return 0;
211 }
212 
213 /**
214  * asd_verify_lseq - verify the microcode of a link sequencer
215  * @asd_ha: pointer to host adapter structure
216  * @_prog: pointer to the microcode
217  * @size: size of the microcode in bytes
218  * @lseq: link sequencer of interest
219  *
220  * The link sequencer code is accessed in 4 KB pages, which are selected
221  * by setting LmRAMPAGE (bits 8 and 9) of the LmBISTCTL1 register.
222  * The 10 KB LSEQm instruction code is mapped, page at a time, at
223  * LmSEQRAM address.
224  */
asd_verify_lseq(struct asd_ha_struct * asd_ha,const u8 * _prog,u32 size,int lseq)225 static int asd_verify_lseq(struct asd_ha_struct *asd_ha, const u8 *_prog,
226 			   u32 size, int lseq)
227 {
228 #define LSEQ_CODEPAGE_SIZE 4096
229 	int pages =  (size + LSEQ_CODEPAGE_SIZE - 1) / LSEQ_CODEPAGE_SIZE;
230 	u32 page;
231 	const u32 *prog = (u32 *) _prog;
232 
233 	for (page = 0; page < pages; page++) {
234 		u32 i;
235 
236 		asd_write_reg_dword(asd_ha, LmBISTCTL1(lseq),
237 				    page << LmRAMPAGE_LSHIFT);
238 		for (i = 0; size > 0 && i < LSEQ_CODEPAGE_SIZE;
239 		     i += 4, prog++, size-=4) {
240 
241 			u32 val = asd_read_reg_dword(asd_ha, LmSEQRAM(lseq)+i);
242 
243 			if (le32_to_cpu(*prog) != val) {
244 				asd_printk("%s: LSEQ%d verify failed "
245 					   "page:%d, offs:%d\n",
246 					   pci_name(asd_ha->pcidev),
247 					   lseq, page, i);
248 				return -1;
249 			}
250 		}
251 	}
252 	ASD_DPRINTK("LSEQ%d verified %d bytes, passed\n", lseq,
253 		    (int)((u8 *)prog-_prog));
254 	return 0;
255 }
256 
257 /**
258  * asd_verify_seq -- verify CSEQ/LSEQ microcode
259  * @asd_ha: pointer to host adapter structure
260  * @prog: pointer to microcode
261  * @size: size of the microcode
262  * @lseq_mask: if 0, verify CSEQ microcode, else mask of LSEQs of interest
263  *
264  * Return 0 if microcode is correct, negative on mismatch.
265  */
asd_verify_seq(struct asd_ha_struct * asd_ha,const u8 * prog,u32 size,u8 lseq_mask)266 static int asd_verify_seq(struct asd_ha_struct *asd_ha, const u8 *prog,
267 			      u32 size, u8 lseq_mask)
268 {
269 	if (lseq_mask == 0)
270 		return asd_verify_cseq(asd_ha, prog, size);
271 	else {
272 		int lseq, err;
273 
274 		for_each_sequencer(lseq_mask, lseq_mask, lseq) {
275 			err = asd_verify_lseq(asd_ha, prog, size, lseq);
276 			if (err)
277 				return err;
278 		}
279 	}
280 
281 	return 0;
282 }
283 #define ASD_DMA_MODE_DOWNLOAD
284 #ifdef ASD_DMA_MODE_DOWNLOAD
285 /* This is the size of the CSEQ Mapped instruction page */
286 #define MAX_DMA_OVLY_COUNT ((1U << 14)-1)
asd_download_seq(struct asd_ha_struct * asd_ha,const u8 * const prog,u32 size,u8 lseq_mask)287 static int asd_download_seq(struct asd_ha_struct *asd_ha,
288 			    const u8 * const prog, u32 size, u8 lseq_mask)
289 {
290 	u32 comstaten;
291 	u32 reg;
292 	int page;
293 	const int pages = (size + MAX_DMA_OVLY_COUNT - 1) / MAX_DMA_OVLY_COUNT;
294 	struct asd_dma_tok *token;
295 	int err = 0;
296 
297 	if (size % 4) {
298 		asd_printk("sequencer program not multiple of 4\n");
299 		return -1;
300 	}
301 
302 	asd_pause_cseq(asd_ha);
303 	asd_pause_lseq(asd_ha, 0xFF);
304 
305 	/* save, disable and clear interrupts */
306 	comstaten = asd_read_reg_dword(asd_ha, COMSTATEN);
307 	asd_write_reg_dword(asd_ha, COMSTATEN, 0);
308 	asd_write_reg_dword(asd_ha, COMSTAT, COMSTAT_MASK);
309 
310 	asd_write_reg_dword(asd_ha, CHIMINTEN, RST_CHIMINTEN);
311 	asd_write_reg_dword(asd_ha, CHIMINT, CHIMINT_MASK);
312 
313 	token = asd_alloc_coherent(asd_ha, MAX_DMA_OVLY_COUNT, GFP_KERNEL);
314 	if (!token) {
315 		asd_printk("out of memory for dma SEQ download\n");
316 		err = -ENOMEM;
317 		goto out;
318 	}
319 	ASD_DPRINTK("dma-ing %d bytes\n", size);
320 
321 	for (page = 0; page < pages; page++) {
322 		int i;
323 		u32 left = min(size-page*MAX_DMA_OVLY_COUNT,
324 			       (u32)MAX_DMA_OVLY_COUNT);
325 
326 		memcpy(token->vaddr, prog + page*MAX_DMA_OVLY_COUNT, left);
327 		asd_write_reg_addr(asd_ha, OVLYDMAADR, token->dma_handle);
328 		asd_write_reg_dword(asd_ha, OVLYDMACNT, left);
329 		reg = !page ? RESETOVLYDMA : 0;
330 		reg |= (STARTOVLYDMA | OVLYHALTERR);
331 		reg |= (lseq_mask ? (((u32)lseq_mask) << 8) : OVLYCSEQ);
332 		/* Start DMA. */
333 		asd_write_reg_dword(asd_ha, OVLYDMACTL, reg);
334 
335 		for (i = PAUSE_TRIES*100; i > 0; i--) {
336 			u32 dmadone = asd_read_reg_dword(asd_ha, OVLYDMACTL);
337 			if (!(dmadone & OVLYDMAACT))
338 				break;
339 			udelay(PAUSE_DELAY);
340 		}
341 	}
342 
343 	reg = asd_read_reg_dword(asd_ha, COMSTAT);
344 	if (!(reg & OVLYDMADONE) || (reg & OVLYERR)
345 	    || (asd_read_reg_dword(asd_ha, CHIMINT) & DEVEXCEPT_MASK)){
346 		asd_printk("%s: error DMA-ing sequencer code\n",
347 			   pci_name(asd_ha->pcidev));
348 		err = -ENODEV;
349 	}
350 
351 	asd_free_coherent(asd_ha, token);
352  out:
353 	asd_write_reg_dword(asd_ha, COMSTATEN, comstaten);
354 
355 	return err ? : asd_verify_seq(asd_ha, prog, size, lseq_mask);
356 }
357 #else /* ASD_DMA_MODE_DOWNLOAD */
asd_download_seq(struct asd_ha_struct * asd_ha,const u8 * _prog,u32 size,u8 lseq_mask)358 static int asd_download_seq(struct asd_ha_struct *asd_ha, const u8 *_prog,
359 			    u32 size, u8 lseq_mask)
360 {
361 	int i;
362 	u32 reg = 0;
363 	const u32 *prog = (u32 *) _prog;
364 
365 	if (size % 4) {
366 		asd_printk("sequencer program not multiple of 4\n");
367 		return -1;
368 	}
369 
370 	asd_pause_cseq(asd_ha);
371 	asd_pause_lseq(asd_ha, 0xFF);
372 
373 	reg |= (lseq_mask ? (((u32)lseq_mask) << 8) : OVLYCSEQ);
374 	reg |= PIOCMODE;
375 
376 	asd_write_reg_dword(asd_ha, OVLYDMACNT, size);
377 	asd_write_reg_dword(asd_ha, OVLYDMACTL, reg);
378 
379 	ASD_DPRINTK("downloading %s sequencer%s in PIO mode...\n",
380 		    lseq_mask ? "LSEQ" : "CSEQ", lseq_mask ? "s" : "");
381 
382 	for (i = 0; i < size; i += 4, prog++)
383 		asd_write_reg_dword(asd_ha, SPIODATA, *prog);
384 
385 	reg = (reg & ~PIOCMODE) | OVLYHALTERR;
386 	asd_write_reg_dword(asd_ha, OVLYDMACTL, reg);
387 
388 	return asd_verify_seq(asd_ha, _prog, size, lseq_mask);
389 }
390 #endif /* ASD_DMA_MODE_DOWNLOAD */
391 
392 /**
393  * asd_seq_download_seqs - download the sequencer microcode
394  * @asd_ha: pointer to host adapter structure
395  *
396  * Download the central and link sequencer microcode.
397  */
asd_seq_download_seqs(struct asd_ha_struct * asd_ha)398 static int asd_seq_download_seqs(struct asd_ha_struct *asd_ha)
399 {
400 	int 	err;
401 
402 	if (!asd_ha->hw_prof.enabled_phys) {
403 		asd_printk("%s: no enabled phys!\n", pci_name(asd_ha->pcidev));
404 		return -ENODEV;
405 	}
406 
407 	/* Download the CSEQ */
408 	ASD_DPRINTK("downloading CSEQ...\n");
409 	err = asd_download_seq(asd_ha, cseq_code, cseq_code_size, 0);
410 	if (err) {
411 		asd_printk("CSEQ download failed:%d\n", err);
412 		return err;
413 	}
414 
415 	/* Download the Link Sequencers code. All of the Link Sequencers
416 	 * microcode can be downloaded at the same time.
417 	 */
418 	ASD_DPRINTK("downloading LSEQs...\n");
419 	err = asd_download_seq(asd_ha, lseq_code, lseq_code_size,
420 			       asd_ha->hw_prof.enabled_phys);
421 	if (err) {
422 		/* Try it one at a time */
423 		u8 lseq;
424 		u8 lseq_mask = asd_ha->hw_prof.enabled_phys;
425 
426 		for_each_sequencer(lseq_mask, lseq_mask, lseq) {
427 			err = asd_download_seq(asd_ha, lseq_code,
428 					       lseq_code_size, 1<<lseq);
429 			if (err)
430 				break;
431 		}
432 	}
433 	if (err)
434 		asd_printk("LSEQs download failed:%d\n", err);
435 
436 	return err;
437 }
438 
439 /* ---------- Initializing the chip, chip memory, etc. ---------- */
440 
441 /**
442  * asd_init_cseq_mip - initialize CSEQ mode independent pages 4-7
443  * @asd_ha: pointer to host adapter structure
444  */
asd_init_cseq_mip(struct asd_ha_struct * asd_ha)445 static void asd_init_cseq_mip(struct asd_ha_struct *asd_ha)
446 {
447 	/* CSEQ Mode Independent, page 4 setup. */
448 	asd_write_reg_word(asd_ha, CSEQ_Q_EXE_HEAD, 0xFFFF);
449 	asd_write_reg_word(asd_ha, CSEQ_Q_EXE_TAIL, 0xFFFF);
450 	asd_write_reg_word(asd_ha, CSEQ_Q_DONE_HEAD, 0xFFFF);
451 	asd_write_reg_word(asd_ha, CSEQ_Q_DONE_TAIL, 0xFFFF);
452 	asd_write_reg_word(asd_ha, CSEQ_Q_SEND_HEAD, 0xFFFF);
453 	asd_write_reg_word(asd_ha, CSEQ_Q_SEND_TAIL, 0xFFFF);
454 	asd_write_reg_word(asd_ha, CSEQ_Q_DMA2CHIM_HEAD, 0xFFFF);
455 	asd_write_reg_word(asd_ha, CSEQ_Q_DMA2CHIM_TAIL, 0xFFFF);
456 	asd_write_reg_word(asd_ha, CSEQ_Q_COPY_HEAD, 0xFFFF);
457 	asd_write_reg_word(asd_ha, CSEQ_Q_COPY_TAIL, 0xFFFF);
458 	asd_write_reg_word(asd_ha, CSEQ_REG0, 0);
459 	asd_write_reg_word(asd_ha, CSEQ_REG1, 0);
460 	asd_write_reg_dword(asd_ha, CSEQ_REG2, 0);
461 	asd_write_reg_byte(asd_ha, CSEQ_LINK_CTL_Q_MAP, 0);
462 	{
463 		u8 con = asd_read_reg_byte(asd_ha, CCONEXIST);
464 		u8 val = hweight8(con);
465 		asd_write_reg_byte(asd_ha, CSEQ_MAX_CSEQ_MODE, (val<<4)|val);
466 	}
467 	asd_write_reg_word(asd_ha, CSEQ_FREE_LIST_HACK_COUNT, 0);
468 
469 	/* CSEQ Mode independent, page 5 setup. */
470 	asd_write_reg_dword(asd_ha, CSEQ_EST_NEXUS_REQ_QUEUE, 0);
471 	asd_write_reg_dword(asd_ha, CSEQ_EST_NEXUS_REQ_QUEUE+4, 0);
472 	asd_write_reg_dword(asd_ha, CSEQ_EST_NEXUS_REQ_COUNT, 0);
473 	asd_write_reg_dword(asd_ha, CSEQ_EST_NEXUS_REQ_COUNT+4, 0);
474 	asd_write_reg_word(asd_ha, CSEQ_Q_EST_NEXUS_HEAD, 0xFFFF);
475 	asd_write_reg_word(asd_ha, CSEQ_Q_EST_NEXUS_TAIL, 0xFFFF);
476 	asd_write_reg_word(asd_ha, CSEQ_NEED_EST_NEXUS_SCB, 0);
477 	asd_write_reg_byte(asd_ha, CSEQ_EST_NEXUS_REQ_HEAD, 0);
478 	asd_write_reg_byte(asd_ha, CSEQ_EST_NEXUS_REQ_TAIL, 0);
479 	asd_write_reg_byte(asd_ha, CSEQ_EST_NEXUS_SCB_OFFSET, 0);
480 
481 	/* CSEQ Mode independent, page 6 setup. */
482 	asd_write_reg_word(asd_ha, CSEQ_INT_ROUT_RET_ADDR0, 0);
483 	asd_write_reg_word(asd_ha, CSEQ_INT_ROUT_RET_ADDR1, 0);
484 	asd_write_reg_word(asd_ha, CSEQ_INT_ROUT_SCBPTR, 0);
485 	asd_write_reg_byte(asd_ha, CSEQ_INT_ROUT_MODE, 0);
486 	asd_write_reg_byte(asd_ha, CSEQ_ISR_SCRATCH_FLAGS, 0);
487 	asd_write_reg_word(asd_ha, CSEQ_ISR_SAVE_SINDEX, 0);
488 	asd_write_reg_word(asd_ha, CSEQ_ISR_SAVE_DINDEX, 0);
489 	asd_write_reg_word(asd_ha, CSEQ_Q_MONIRTT_HEAD, 0xFFFF);
490 	asd_write_reg_word(asd_ha, CSEQ_Q_MONIRTT_TAIL, 0xFFFF);
491 	/* Calculate the free scb mask. */
492 	{
493 		u16 cmdctx = asd_get_cmdctx_size(asd_ha);
494 		cmdctx = (~((cmdctx/128)-1)) >> 8;
495 		asd_write_reg_byte(asd_ha, CSEQ_FREE_SCB_MASK, (u8)cmdctx);
496 	}
497 	asd_write_reg_word(asd_ha, CSEQ_BUILTIN_FREE_SCB_HEAD,
498 			   first_scb_site_no);
499 	asd_write_reg_word(asd_ha, CSEQ_BUILTIN_FREE_SCB_TAIL,
500 			   last_scb_site_no);
501 	asd_write_reg_word(asd_ha, CSEQ_EXTENDED_FREE_SCB_HEAD, 0xFFFF);
502 	asd_write_reg_word(asd_ha, CSEQ_EXTENDED_FREE_SCB_TAIL, 0xFFFF);
503 
504 	/* CSEQ Mode independent, page 7 setup. */
505 	asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_QUEUE, 0);
506 	asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_QUEUE+4, 0);
507 	asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_COUNT, 0);
508 	asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_COUNT+4, 0);
509 	asd_write_reg_word(asd_ha, CSEQ_Q_EMPTY_HEAD, 0xFFFF);
510 	asd_write_reg_word(asd_ha, CSEQ_Q_EMPTY_TAIL, 0xFFFF);
511 	asd_write_reg_word(asd_ha, CSEQ_NEED_EMPTY_SCB, 0);
512 	asd_write_reg_byte(asd_ha, CSEQ_EMPTY_REQ_HEAD, 0);
513 	asd_write_reg_byte(asd_ha, CSEQ_EMPTY_REQ_TAIL, 0);
514 	asd_write_reg_byte(asd_ha, CSEQ_EMPTY_SCB_OFFSET, 0);
515 	asd_write_reg_word(asd_ha, CSEQ_PRIMITIVE_DATA, 0);
516 	asd_write_reg_dword(asd_ha, CSEQ_TIMEOUT_CONST, 0);
517 }
518 
519 /**
520  * asd_init_cseq_mdp - initialize CSEQ Mode dependent pages
521  * @asd_ha: pointer to host adapter structure
522  */
asd_init_cseq_mdp(struct asd_ha_struct * asd_ha)523 static void asd_init_cseq_mdp(struct asd_ha_struct *asd_ha)
524 {
525 	int	i;
526 	int	moffs;
527 
528 	moffs = CSEQ_PAGE_SIZE * 2;
529 
530 	/* CSEQ Mode dependent, modes 0-7, page 0 setup. */
531 	for (i = 0; i < 8; i++) {
532 		asd_write_reg_word(asd_ha, i*moffs+CSEQ_LRM_SAVE_SINDEX, 0);
533 		asd_write_reg_word(asd_ha, i*moffs+CSEQ_LRM_SAVE_SCBPTR, 0);
534 		asd_write_reg_word(asd_ha, i*moffs+CSEQ_Q_LINK_HEAD, 0xFFFF);
535 		asd_write_reg_word(asd_ha, i*moffs+CSEQ_Q_LINK_TAIL, 0xFFFF);
536 		asd_write_reg_byte(asd_ha, i*moffs+CSEQ_LRM_SAVE_SCRPAGE, 0);
537 	}
538 
539 	/* CSEQ Mode dependent, mode 0-7, page 1 and 2 shall be ignored. */
540 
541 	/* CSEQ Mode dependent, mode 8, page 0 setup. */
542 	asd_write_reg_word(asd_ha, CSEQ_RET_ADDR, 0xFFFF);
543 	asd_write_reg_word(asd_ha, CSEQ_RET_SCBPTR, 0);
544 	asd_write_reg_word(asd_ha, CSEQ_SAVE_SCBPTR, 0);
545 	asd_write_reg_word(asd_ha, CSEQ_EMPTY_TRANS_CTX, 0);
546 	asd_write_reg_word(asd_ha, CSEQ_RESP_LEN, 0);
547 	asd_write_reg_word(asd_ha, CSEQ_TMF_SCBPTR, 0);
548 	asd_write_reg_word(asd_ha, CSEQ_GLOBAL_PREV_SCB, 0);
549 	asd_write_reg_word(asd_ha, CSEQ_GLOBAL_HEAD, 0);
550 	asd_write_reg_word(asd_ha, CSEQ_CLEAR_LU_HEAD, 0);
551 	asd_write_reg_byte(asd_ha, CSEQ_TMF_OPCODE, 0);
552 	asd_write_reg_byte(asd_ha, CSEQ_SCRATCH_FLAGS, 0);
553 	asd_write_reg_word(asd_ha, CSEQ_HSB_SITE, 0);
554 	asd_write_reg_word(asd_ha, CSEQ_FIRST_INV_SCB_SITE,
555 			   (u16)last_scb_site_no+1);
556 	asd_write_reg_word(asd_ha, CSEQ_FIRST_INV_DDB_SITE,
557 			   (u16)asd_ha->hw_prof.max_ddbs);
558 
559 	/* CSEQ Mode dependent, mode 8, page 1 setup. */
560 	asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CLEAR, 0);
561 	asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CLEAR + 4, 0);
562 	asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CHECK, 0);
563 	asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CHECK + 4, 0);
564 
565 	/* CSEQ Mode dependent, mode 8, page 2 setup. */
566 	/* Tell the sequencer the bus address of the first SCB. */
567 	asd_write_reg_addr(asd_ha, CSEQ_HQ_NEW_POINTER,
568 			   asd_ha->seq.next_scb.dma_handle);
569 	ASD_DPRINTK("First SCB dma_handle: 0x%llx\n",
570 		    (unsigned long long)asd_ha->seq.next_scb.dma_handle);
571 
572 	/* Tell the sequencer the first Done List entry address. */
573 	asd_write_reg_addr(asd_ha, CSEQ_HQ_DONE_BASE,
574 			   asd_ha->seq.actual_dl->dma_handle);
575 
576 	/* Initialize the Q_DONE_POINTER with the least significant
577 	 * 4 bytes of the first Done List address. */
578 	asd_write_reg_dword(asd_ha, CSEQ_HQ_DONE_POINTER,
579 			    ASD_BUSADDR_LO(asd_ha->seq.actual_dl->dma_handle));
580 
581 	asd_write_reg_byte(asd_ha, CSEQ_HQ_DONE_PASS, ASD_DEF_DL_TOGGLE);
582 
583 	/* CSEQ Mode dependent, mode 8, page 3 shall be ignored. */
584 }
585 
586 /**
587  * asd_init_cseq_scratch -- setup and init CSEQ
588  * @asd_ha: pointer to host adapter structure
589  *
590  * Setup and initialize Central sequencers. Initialiaze the mode
591  * independent and dependent scratch page to the default settings.
592  */
asd_init_cseq_scratch(struct asd_ha_struct * asd_ha)593 static void asd_init_cseq_scratch(struct asd_ha_struct *asd_ha)
594 {
595 	asd_init_cseq_mip(asd_ha);
596 	asd_init_cseq_mdp(asd_ha);
597 }
598 
599 /**
600  * asd_init_lseq_mip -- initialize LSEQ Mode independent pages 0-3
601  * @asd_ha: pointer to host adapter structure
602  */
asd_init_lseq_mip(struct asd_ha_struct * asd_ha,u8 lseq)603 static void asd_init_lseq_mip(struct asd_ha_struct *asd_ha, u8 lseq)
604 {
605 	int i;
606 
607 	/* LSEQ Mode independent page 0 setup. */
608 	asd_write_reg_word(asd_ha, LmSEQ_Q_TGTXFR_HEAD(lseq), 0xFFFF);
609 	asd_write_reg_word(asd_ha, LmSEQ_Q_TGTXFR_TAIL(lseq), 0xFFFF);
610 	asd_write_reg_byte(asd_ha, LmSEQ_LINK_NUMBER(lseq), lseq);
611 	asd_write_reg_byte(asd_ha, LmSEQ_SCRATCH_FLAGS(lseq),
612 			   ASD_NOTIFY_ENABLE_SPINUP);
613 	asd_write_reg_dword(asd_ha, LmSEQ_CONNECTION_STATE(lseq),0x08000000);
614 	asd_write_reg_word(asd_ha, LmSEQ_CONCTL(lseq), 0);
615 	asd_write_reg_byte(asd_ha, LmSEQ_CONSTAT(lseq), 0);
616 	asd_write_reg_byte(asd_ha, LmSEQ_CONNECTION_MODES(lseq), 0);
617 	asd_write_reg_word(asd_ha, LmSEQ_REG1_ISR(lseq), 0);
618 	asd_write_reg_word(asd_ha, LmSEQ_REG2_ISR(lseq), 0);
619 	asd_write_reg_word(asd_ha, LmSEQ_REG3_ISR(lseq), 0);
620 	asd_write_reg_dword(asd_ha, LmSEQ_REG0_ISR(lseq), 0);
621 	asd_write_reg_dword(asd_ha, LmSEQ_REG0_ISR(lseq)+4, 0);
622 
623 	/* LSEQ Mode independent page 1 setup. */
624 	asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR0(lseq), 0xFFFF);
625 	asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR1(lseq), 0xFFFF);
626 	asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR2(lseq), 0xFFFF);
627 	asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR3(lseq), 0xFFFF);
628 	asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE0(lseq), 0);
629 	asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE1(lseq), 0);
630 	asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE2(lseq), 0);
631 	asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE3(lseq), 0);
632 	asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_HEAD(lseq), 0);
633 	asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_TAIL(lseq), 0);
634 	asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_BUF_AVAIL(lseq), 0);
635 	asd_write_reg_dword(asd_ha, LmSEQ_TIMEOUT_CONST(lseq), 0);
636 	asd_write_reg_word(asd_ha, LmSEQ_ISR_SAVE_SINDEX(lseq), 0);
637 	asd_write_reg_word(asd_ha, LmSEQ_ISR_SAVE_DINDEX(lseq), 0);
638 
639 	/* LSEQ Mode Independent page 2 setup. */
640 	asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR0(lseq), 0xFFFF);
641 	asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR1(lseq), 0xFFFF);
642 	asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR2(lseq), 0xFFFF);
643 	asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR3(lseq), 0xFFFF);
644 	asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD0(lseq), 0);
645 	asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD1(lseq), 0);
646 	asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD2(lseq), 0);
647 	asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD3(lseq), 0);
648 	asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_HEAD(lseq), 0);
649 	asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_TAIL(lseq), 0);
650 	asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_BUFS_AVAIL(lseq), 0);
651 	for (i = 0; i < 12; i += 4)
652 		asd_write_reg_dword(asd_ha, LmSEQ_ATA_SCR_REGS(lseq) + i, 0);
653 
654 	/* LSEQ Mode Independent page 3 setup. */
655 
656 	/* Device present timer timeout */
657 	asd_write_reg_dword(asd_ha, LmSEQ_DEV_PRES_TMR_TOUT_CONST(lseq),
658 			    ASD_DEV_PRESENT_TIMEOUT);
659 
660 	/* SATA interlock timer disabled */
661 	asd_write_reg_dword(asd_ha, LmSEQ_SATA_INTERLOCK_TIMEOUT(lseq),
662 			    ASD_SATA_INTERLOCK_TIMEOUT);
663 
664 	/* STP shutdown timer timeout constant, IGNORED by the sequencer,
665 	 * always 0. */
666 	asd_write_reg_dword(asd_ha, LmSEQ_STP_SHUTDOWN_TIMEOUT(lseq),
667 			    ASD_STP_SHUTDOWN_TIMEOUT);
668 
669 	asd_write_reg_dword(asd_ha, LmSEQ_SRST_ASSERT_TIMEOUT(lseq),
670 			    ASD_SRST_ASSERT_TIMEOUT);
671 
672 	asd_write_reg_dword(asd_ha, LmSEQ_RCV_FIS_TIMEOUT(lseq),
673 			    ASD_RCV_FIS_TIMEOUT);
674 
675 	asd_write_reg_dword(asd_ha, LmSEQ_ONE_MILLISEC_TIMEOUT(lseq),
676 			    ASD_ONE_MILLISEC_TIMEOUT);
677 
678 	/* COM_INIT timer */
679 	asd_write_reg_dword(asd_ha, LmSEQ_TEN_MS_COMINIT_TIMEOUT(lseq),
680 			    ASD_TEN_MILLISEC_TIMEOUT);
681 
682 	asd_write_reg_dword(asd_ha, LmSEQ_SMP_RCV_TIMEOUT(lseq),
683 			    ASD_SMP_RCV_TIMEOUT);
684 }
685 
686 /**
687  * asd_init_lseq_mdp -- initialize LSEQ mode dependent pages.
688  * @asd_ha: pointer to host adapter structure
689  */
asd_init_lseq_mdp(struct asd_ha_struct * asd_ha,int lseq)690 static void asd_init_lseq_mdp(struct asd_ha_struct *asd_ha,  int lseq)
691 {
692 	int    i;
693 	u32    moffs;
694 	u16 ret_addr[] = {
695 		0xFFFF,		  /* mode 0 */
696 		0xFFFF,		  /* mode 1 */
697 		mode2_task,	  /* mode 2 */
698 		0,
699 		0xFFFF,		  /* mode 4/5 */
700 		0xFFFF,		  /* mode 4/5 */
701 	};
702 
703 	/*
704 	 * Mode 0,1,2 and 4/5 have common field on page 0 for the first
705 	 * 14 bytes.
706 	 */
707 	for (i = 0; i < 3; i++) {
708 		moffs = i * LSEQ_MODE_SCRATCH_SIZE;
709 		asd_write_reg_word(asd_ha, LmSEQ_RET_ADDR(lseq)+moffs,
710 				   ret_addr[i]);
711 		asd_write_reg_word(asd_ha, LmSEQ_REG0_MODE(lseq)+moffs, 0);
712 		asd_write_reg_word(asd_ha, LmSEQ_MODE_FLAGS(lseq)+moffs, 0);
713 		asd_write_reg_word(asd_ha, LmSEQ_RET_ADDR2(lseq)+moffs,0xFFFF);
714 		asd_write_reg_word(asd_ha, LmSEQ_RET_ADDR1(lseq)+moffs,0xFFFF);
715 		asd_write_reg_byte(asd_ha, LmSEQ_OPCODE_TO_CSEQ(lseq)+moffs,0);
716 		asd_write_reg_word(asd_ha, LmSEQ_DATA_TO_CSEQ(lseq)+moffs,0);
717 	}
718 	/*
719 	 *  Mode 5 page 0 overlaps the same scratch page with Mode 0 page 3.
720 	 */
721 	asd_write_reg_word(asd_ha,
722 			 LmSEQ_RET_ADDR(lseq)+LSEQ_MODE5_PAGE0_OFFSET,
723 			   ret_addr[5]);
724 	asd_write_reg_word(asd_ha,
725 			 LmSEQ_REG0_MODE(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0);
726 	asd_write_reg_word(asd_ha,
727 			 LmSEQ_MODE_FLAGS(lseq)+LSEQ_MODE5_PAGE0_OFFSET, 0);
728 	asd_write_reg_word(asd_ha,
729 			 LmSEQ_RET_ADDR2(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0xFFFF);
730 	asd_write_reg_word(asd_ha,
731 			 LmSEQ_RET_ADDR1(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0xFFFF);
732 	asd_write_reg_byte(asd_ha,
733 		         LmSEQ_OPCODE_TO_CSEQ(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0);
734 	asd_write_reg_word(asd_ha,
735 		         LmSEQ_DATA_TO_CSEQ(lseq)+LSEQ_MODE5_PAGE0_OFFSET, 0);
736 
737 	/* LSEQ Mode dependent 0, page 0 setup. */
738 	asd_write_reg_word(asd_ha, LmSEQ_FIRST_INV_DDB_SITE(lseq),
739 			   (u16)asd_ha->hw_prof.max_ddbs);
740 	asd_write_reg_word(asd_ha, LmSEQ_EMPTY_TRANS_CTX(lseq), 0);
741 	asd_write_reg_word(asd_ha, LmSEQ_RESP_LEN(lseq), 0);
742 	asd_write_reg_word(asd_ha, LmSEQ_FIRST_INV_SCB_SITE(lseq),
743 			   (u16)last_scb_site_no+1);
744 	asd_write_reg_word(asd_ha, LmSEQ_INTEN_SAVE(lseq),
745 			    (u16) ((LmM0INTEN_MASK & 0xFFFF0000) >> 16));
746 	asd_write_reg_word(asd_ha, LmSEQ_INTEN_SAVE(lseq) + 2,
747 			    (u16) LmM0INTEN_MASK & 0xFFFF);
748 	asd_write_reg_byte(asd_ha, LmSEQ_LINK_RST_FRM_LEN(lseq), 0);
749 	asd_write_reg_byte(asd_ha, LmSEQ_LINK_RST_PROTOCOL(lseq), 0);
750 	asd_write_reg_byte(asd_ha, LmSEQ_RESP_STATUS(lseq), 0);
751 	asd_write_reg_byte(asd_ha, LmSEQ_LAST_LOADED_SGE(lseq), 0);
752 	asd_write_reg_word(asd_ha, LmSEQ_SAVE_SCBPTR(lseq), 0);
753 
754 	/* LSEQ mode dependent, mode 1, page 0 setup. */
755 	asd_write_reg_word(asd_ha, LmSEQ_Q_XMIT_HEAD(lseq), 0xFFFF);
756 	asd_write_reg_word(asd_ha, LmSEQ_M1_EMPTY_TRANS_CTX(lseq), 0);
757 	asd_write_reg_word(asd_ha, LmSEQ_INI_CONN_TAG(lseq), 0);
758 	asd_write_reg_byte(asd_ha, LmSEQ_FAILED_OPEN_STATUS(lseq), 0);
759 	asd_write_reg_byte(asd_ha, LmSEQ_XMIT_REQUEST_TYPE(lseq), 0);
760 	asd_write_reg_byte(asd_ha, LmSEQ_M1_RESP_STATUS(lseq), 0);
761 	asd_write_reg_byte(asd_ha, LmSEQ_M1_LAST_LOADED_SGE(lseq), 0);
762 	asd_write_reg_word(asd_ha, LmSEQ_M1_SAVE_SCBPTR(lseq), 0);
763 
764 	/* LSEQ Mode dependent mode 2, page 0 setup */
765 	asd_write_reg_word(asd_ha, LmSEQ_PORT_COUNTER(lseq), 0);
766 	asd_write_reg_word(asd_ha, LmSEQ_PM_TABLE_PTR(lseq), 0);
767 	asd_write_reg_word(asd_ha, LmSEQ_SATA_INTERLOCK_TMR_SAVE(lseq), 0);
768 	asd_write_reg_word(asd_ha, LmSEQ_IP_BITL(lseq), 0);
769 	asd_write_reg_word(asd_ha, LmSEQ_COPY_SMP_CONN_TAG(lseq), 0);
770 	asd_write_reg_byte(asd_ha, LmSEQ_P0M2_OFFS1AH(lseq), 0);
771 
772 	/* LSEQ Mode dependent, mode 4/5, page 0 setup. */
773 	asd_write_reg_byte(asd_ha, LmSEQ_SAVED_OOB_STATUS(lseq), 0);
774 	asd_write_reg_byte(asd_ha, LmSEQ_SAVED_OOB_MODE(lseq), 0);
775 	asd_write_reg_word(asd_ha, LmSEQ_Q_LINK_HEAD(lseq), 0xFFFF);
776 	asd_write_reg_byte(asd_ha, LmSEQ_LINK_RST_ERR(lseq), 0);
777 	asd_write_reg_byte(asd_ha, LmSEQ_SAVED_OOB_SIGNALS(lseq), 0);
778 	asd_write_reg_byte(asd_ha, LmSEQ_SAS_RESET_MODE(lseq), 0);
779 	asd_write_reg_byte(asd_ha, LmSEQ_LINK_RESET_RETRY_COUNT(lseq), 0);
780 	asd_write_reg_byte(asd_ha, LmSEQ_NUM_LINK_RESET_RETRIES(lseq), 0);
781 	asd_write_reg_word(asd_ha, LmSEQ_OOB_INT_ENABLES(lseq), 0);
782 	/*
783 	 * Set the desired interval between transmissions of the NOTIFY
784 	 * (ENABLE SPINUP) primitive.  Must be initilized to val - 1.
785 	 */
786 	asd_write_reg_word(asd_ha, LmSEQ_NOTIFY_TIMER_TIMEOUT(lseq),
787 			   ASD_NOTIFY_TIMEOUT - 1);
788 	/* No delay for the first NOTIFY to be sent to the attached target. */
789 	asd_write_reg_word(asd_ha, LmSEQ_NOTIFY_TIMER_DOWN_COUNT(lseq),
790 			   ASD_NOTIFY_DOWN_COUNT);
791 	asd_write_reg_word(asd_ha, LmSEQ_NOTIFY_TIMER_INITIAL_COUNT(lseq),
792 			   ASD_NOTIFY_DOWN_COUNT);
793 
794 	/* LSEQ Mode dependent, mode 0 and 1, page 1 setup. */
795 	for (i = 0; i < 2; i++)	{
796 		int j;
797 		/* Start from Page 1 of Mode 0 and 1. */
798 		moffs = LSEQ_PAGE_SIZE + i*LSEQ_MODE_SCRATCH_SIZE;
799 		/* All the fields of page 1 can be intialized to 0. */
800 		for (j = 0; j < LSEQ_PAGE_SIZE; j += 4)
801 			asd_write_reg_dword(asd_ha, LmSCRATCH(lseq)+moffs+j,0);
802 	}
803 
804 	/* LSEQ Mode dependent, mode 2, page 1 setup. */
805 	asd_write_reg_dword(asd_ha, LmSEQ_INVALID_DWORD_COUNT(lseq), 0);
806 	asd_write_reg_dword(asd_ha, LmSEQ_DISPARITY_ERROR_COUNT(lseq), 0);
807 	asd_write_reg_dword(asd_ha, LmSEQ_LOSS_OF_SYNC_COUNT(lseq), 0);
808 
809 	/* LSEQ Mode dependent, mode 4/5, page 1. */
810 	for (i = 0; i < LSEQ_PAGE_SIZE; i+=4)
811 		asd_write_reg_dword(asd_ha, LmSEQ_FRAME_TYPE_MASK(lseq)+i, 0);
812 	asd_write_reg_byte(asd_ha, LmSEQ_FRAME_TYPE_MASK(lseq), 0xFF);
813 	asd_write_reg_byte(asd_ha, LmSEQ_HASHED_DEST_ADDR_MASK(lseq), 0xFF);
814 	asd_write_reg_byte(asd_ha, LmSEQ_HASHED_DEST_ADDR_MASK(lseq)+1,0xFF);
815 	asd_write_reg_byte(asd_ha, LmSEQ_HASHED_DEST_ADDR_MASK(lseq)+2,0xFF);
816 	asd_write_reg_byte(asd_ha, LmSEQ_HASHED_SRC_ADDR_MASK(lseq), 0xFF);
817 	asd_write_reg_byte(asd_ha, LmSEQ_HASHED_SRC_ADDR_MASK(lseq)+1, 0xFF);
818 	asd_write_reg_byte(asd_ha, LmSEQ_HASHED_SRC_ADDR_MASK(lseq)+2, 0xFF);
819 	asd_write_reg_dword(asd_ha, LmSEQ_DATA_OFFSET(lseq), 0xFFFFFFFF);
820 
821 	/* LSEQ Mode dependent, mode 0, page 2 setup. */
822 	asd_write_reg_dword(asd_ha, LmSEQ_SMP_RCV_TIMER_TERM_TS(lseq), 0);
823 	asd_write_reg_byte(asd_ha, LmSEQ_DEVICE_BITS(lseq), 0);
824 	asd_write_reg_word(asd_ha, LmSEQ_SDB_DDB(lseq), 0);
825 	asd_write_reg_byte(asd_ha, LmSEQ_SDB_NUM_TAGS(lseq), 0);
826 	asd_write_reg_byte(asd_ha, LmSEQ_SDB_CURR_TAG(lseq), 0);
827 
828 	/* LSEQ Mode Dependent 1, page 2 setup. */
829 	asd_write_reg_dword(asd_ha, LmSEQ_TX_ID_ADDR_FRAME(lseq), 0);
830 	asd_write_reg_dword(asd_ha, LmSEQ_TX_ID_ADDR_FRAME(lseq)+4, 0);
831 	asd_write_reg_dword(asd_ha, LmSEQ_OPEN_TIMER_TERM_TS(lseq), 0);
832 	asd_write_reg_dword(asd_ha, LmSEQ_SRST_AS_TIMER_TERM_TS(lseq), 0);
833 	asd_write_reg_dword(asd_ha, LmSEQ_LAST_LOADED_SG_EL(lseq), 0);
834 
835 	/* LSEQ Mode Dependent 2, page 2 setup. */
836 	/* The LmSEQ_STP_SHUTDOWN_TIMER_TERM_TS is IGNORED by the sequencer,
837 	 * i.e. always 0. */
838 	asd_write_reg_dword(asd_ha, LmSEQ_STP_SHUTDOWN_TIMER_TERM_TS(lseq),0);
839 	asd_write_reg_dword(asd_ha, LmSEQ_CLOSE_TIMER_TERM_TS(lseq), 0);
840 	asd_write_reg_dword(asd_ha, LmSEQ_BREAK_TIMER_TERM_TS(lseq), 0);
841 	asd_write_reg_dword(asd_ha, LmSEQ_DWS_RESET_TIMER_TERM_TS(lseq), 0);
842 	asd_write_reg_dword(asd_ha,LmSEQ_SATA_INTERLOCK_TIMER_TERM_TS(lseq),0);
843 	asd_write_reg_dword(asd_ha, LmSEQ_MCTL_TIMER_TERM_TS(lseq), 0);
844 
845 	/* LSEQ Mode Dependent 4/5, page 2 setup. */
846 	asd_write_reg_dword(asd_ha, LmSEQ_COMINIT_TIMER_TERM_TS(lseq), 0);
847 	asd_write_reg_dword(asd_ha, LmSEQ_RCV_ID_TIMER_TERM_TS(lseq), 0);
848 	asd_write_reg_dword(asd_ha, LmSEQ_RCV_FIS_TIMER_TERM_TS(lseq), 0);
849 	asd_write_reg_dword(asd_ha, LmSEQ_DEV_PRES_TIMER_TERM_TS(lseq),	0);
850 }
851 
852 /**
853  * asd_init_lseq_scratch -- setup and init link sequencers
854  * @asd_ha: pointer to host adapter struct
855  */
asd_init_lseq_scratch(struct asd_ha_struct * asd_ha)856 static void asd_init_lseq_scratch(struct asd_ha_struct *asd_ha)
857 {
858 	u8 lseq;
859 	u8 lseq_mask;
860 
861 	lseq_mask = asd_ha->hw_prof.enabled_phys;
862 	for_each_sequencer(lseq_mask, lseq_mask, lseq) {
863 		asd_init_lseq_mip(asd_ha, lseq);
864 		asd_init_lseq_mdp(asd_ha, lseq);
865 	}
866 }
867 
868 /**
869  * asd_init_scb_sites -- initialize sequencer SCB sites (memory).
870  * @asd_ha: pointer to host adapter structure
871  *
872  * This should be done before initializing common CSEQ and LSEQ
873  * scratch since those areas depend on some computed values here,
874  * last_scb_site_no, etc.
875  */
asd_init_scb_sites(struct asd_ha_struct * asd_ha)876 static void asd_init_scb_sites(struct asd_ha_struct *asd_ha)
877 {
878 	u16	site_no;
879 	u16     max_scbs = 0;
880 
881 	for (site_no = asd_ha->hw_prof.max_scbs-1;
882 	     site_no != (u16) -1;
883 	     site_no--) {
884 		u16	i;
885 
886 		/* Initialize all fields in the SCB site to 0. */
887 		for (i = 0; i < ASD_SCB_SIZE; i += 4)
888 			asd_scbsite_write_dword(asd_ha, site_no, i, 0);
889 
890 		/* Initialize SCB Site Opcode field to invalid. */
891 		asd_scbsite_write_byte(asd_ha, site_no,
892 				       offsetof(struct scb_header, opcode),
893 				       0xFF);
894 
895 		/* Initialize SCB Site Flags field to mean a response
896 		 * frame has been received.  This means inadvertent
897 		 * frames received to be dropped. */
898 		asd_scbsite_write_byte(asd_ha, site_no, 0x49, 0x01);
899 
900 		/* Workaround needed by SEQ to fix a SATA issue is to exclude
901 		 * certain SCB sites from the free list. */
902 		if (!SCB_SITE_VALID(site_no))
903 			continue;
904 
905 		if (last_scb_site_no == 0)
906 			last_scb_site_no = site_no;
907 
908 		/* For every SCB site, we need to initialize the
909 		 * following fields: Q_NEXT, SCB_OPCODE, SCB_FLAGS,
910 		 * and SG Element Flag. */
911 
912 		/* Q_NEXT field of the last SCB is invalidated. */
913 		asd_scbsite_write_word(asd_ha, site_no, 0, first_scb_site_no);
914 
915 		first_scb_site_no = site_no;
916 		max_scbs++;
917 	}
918 	asd_ha->hw_prof.max_scbs = max_scbs;
919 	ASD_DPRINTK("max_scbs:%d\n", asd_ha->hw_prof.max_scbs);
920 	ASD_DPRINTK("first_scb_site_no:0x%x\n", first_scb_site_no);
921 	ASD_DPRINTK("last_scb_site_no:0x%x\n", last_scb_site_no);
922 }
923 
924 /**
925  * asd_init_cseq_cio - initialize CSEQ CIO registers
926  * @asd_ha: pointer to host adapter structure
927  */
asd_init_cseq_cio(struct asd_ha_struct * asd_ha)928 static void asd_init_cseq_cio(struct asd_ha_struct *asd_ha)
929 {
930 	int i;
931 
932 	asd_write_reg_byte(asd_ha, CSEQCOMINTEN, 0);
933 	asd_write_reg_byte(asd_ha, CSEQDLCTL, ASD_DL_SIZE_BITS);
934 	asd_write_reg_byte(asd_ha, CSEQDLOFFS, 0);
935 	asd_write_reg_byte(asd_ha, CSEQDLOFFS+1, 0);
936 	asd_ha->seq.scbpro = 0;
937 	asd_write_reg_dword(asd_ha, SCBPRO, 0);
938 	asd_write_reg_dword(asd_ha, CSEQCON, 0);
939 
940 	/* Intialize CSEQ Mode 11 Interrupt Vectors.
941 	 * The addresses are 16 bit wide and in dword units.
942 	 * The values of their macros are in byte units.
943 	 * Thus we have to divide by 4. */
944 	asd_write_reg_word(asd_ha, CM11INTVEC0, cseq_vecs[0]);
945 	asd_write_reg_word(asd_ha, CM11INTVEC1, cseq_vecs[1]);
946 	asd_write_reg_word(asd_ha, CM11INTVEC2, cseq_vecs[2]);
947 
948 	/* Enable ARP2HALTC (ARP2 Halted from Halt Code Write). */
949 	asd_write_reg_byte(asd_ha, CARP2INTEN, EN_ARP2HALTC);
950 
951 	/* Initialize CSEQ Scratch Page to 0x04. */
952 	asd_write_reg_byte(asd_ha, CSCRATCHPAGE, 0x04);
953 
954 	/* Initialize CSEQ Mode[0-8] Dependent registers. */
955 	/* Initialize Scratch Page to 0. */
956 	for (i = 0; i < 9; i++)
957 		asd_write_reg_byte(asd_ha, CMnSCRATCHPAGE(i), 0);
958 
959 	/* Reset the ARP2 Program Count. */
960 	asd_write_reg_word(asd_ha, CPRGMCNT, cseq_idle_loop);
961 
962 	for (i = 0; i < 8; i++) {
963 		/* Intialize Mode n Link m Interrupt Enable. */
964 		asd_write_reg_dword(asd_ha, CMnINTEN(i), EN_CMnRSPMBXF);
965 		/* Initialize Mode n Request Mailbox. */
966 		asd_write_reg_dword(asd_ha, CMnREQMBX(i), 0);
967 	}
968 }
969 
970 /**
971  * asd_init_lseq_cio -- initialize LmSEQ CIO registers
972  * @asd_ha: pointer to host adapter structure
973  */
asd_init_lseq_cio(struct asd_ha_struct * asd_ha,int lseq)974 static void asd_init_lseq_cio(struct asd_ha_struct *asd_ha, int lseq)
975 {
976 	u8  *sas_addr;
977 	int  i;
978 
979 	/* Enable ARP2HALTC (ARP2 Halted from Halt Code Write). */
980 	asd_write_reg_dword(asd_ha, LmARP2INTEN(lseq), EN_ARP2HALTC);
981 
982 	asd_write_reg_byte(asd_ha, LmSCRATCHPAGE(lseq), 0);
983 
984 	/* Initialize Mode 0,1, and 2 SCRATCHPAGE to 0. */
985 	for (i = 0; i < 3; i++)
986 		asd_write_reg_byte(asd_ha, LmMnSCRATCHPAGE(lseq, i), 0);
987 
988 	/* Initialize Mode 5 SCRATCHPAGE to 0. */
989 	asd_write_reg_byte(asd_ha, LmMnSCRATCHPAGE(lseq, 5), 0);
990 
991 	asd_write_reg_dword(asd_ha, LmRSPMBX(lseq), 0);
992 	/* Initialize Mode 0,1,2 and 5 Interrupt Enable and
993 	 * Interrupt registers. */
994 	asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 0), LmM0INTEN_MASK);
995 	asd_write_reg_dword(asd_ha, LmMnINT(lseq, 0), 0xFFFFFFFF);
996 	/* Mode 1 */
997 	asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 1), LmM1INTEN_MASK);
998 	asd_write_reg_dword(asd_ha, LmMnINT(lseq, 1), 0xFFFFFFFF);
999 	/* Mode 2 */
1000 	asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 2), LmM2INTEN_MASK);
1001 	asd_write_reg_dword(asd_ha, LmMnINT(lseq, 2), 0xFFFFFFFF);
1002 	/* Mode 5 */
1003 	asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 5), LmM5INTEN_MASK);
1004 	asd_write_reg_dword(asd_ha, LmMnINT(lseq, 5), 0xFFFFFFFF);
1005 
1006 	/* Enable HW Timer status. */
1007 	asd_write_reg_byte(asd_ha, LmHWTSTATEN(lseq), LmHWTSTATEN_MASK);
1008 
1009 	/* Enable Primitive Status 0 and 1. */
1010 	asd_write_reg_dword(asd_ha, LmPRIMSTAT0EN(lseq), LmPRIMSTAT0EN_MASK);
1011 	asd_write_reg_dword(asd_ha, LmPRIMSTAT1EN(lseq), LmPRIMSTAT1EN_MASK);
1012 
1013 	/* Enable Frame Error. */
1014 	asd_write_reg_dword(asd_ha, LmFRMERREN(lseq), LmFRMERREN_MASK);
1015 	asd_write_reg_byte(asd_ha, LmMnHOLDLVL(lseq, 0), 0x50);
1016 
1017 	/* Initialize Mode 0 Transfer Level to 512. */
1018 	asd_write_reg_byte(asd_ha,  LmMnXFRLVL(lseq, 0), LmMnXFRLVL_512);
1019 	/* Initialize Mode 1 Transfer Level to 256. */
1020 	asd_write_reg_byte(asd_ha, LmMnXFRLVL(lseq, 1), LmMnXFRLVL_256);
1021 
1022 	/* Initialize Program Count. */
1023 	asd_write_reg_word(asd_ha, LmPRGMCNT(lseq), lseq_idle_loop);
1024 
1025 	/* Enable Blind SG Move. */
1026 	asd_write_reg_dword(asd_ha, LmMODECTL(lseq), LmBLIND48);
1027 	asd_write_reg_word(asd_ha, LmM3SATATIMER(lseq),
1028 			   ASD_SATA_INTERLOCK_TIMEOUT);
1029 
1030 	(void) asd_read_reg_dword(asd_ha, LmREQMBX(lseq));
1031 
1032 	/* Clear Primitive Status 0 and 1. */
1033 	asd_write_reg_dword(asd_ha, LmPRMSTAT0(lseq), 0xFFFFFFFF);
1034 	asd_write_reg_dword(asd_ha, LmPRMSTAT1(lseq), 0xFFFFFFFF);
1035 
1036 	/* Clear HW Timer status. */
1037 	asd_write_reg_byte(asd_ha, LmHWTSTAT(lseq), 0xFF);
1038 
1039 	/* Clear DMA Errors for Mode 0 and 1. */
1040 	asd_write_reg_byte(asd_ha, LmMnDMAERRS(lseq, 0), 0xFF);
1041 	asd_write_reg_byte(asd_ha, LmMnDMAERRS(lseq, 1), 0xFF);
1042 
1043 	/* Clear SG DMA Errors for Mode 0 and 1. */
1044 	asd_write_reg_byte(asd_ha, LmMnSGDMAERRS(lseq, 0), 0xFF);
1045 	asd_write_reg_byte(asd_ha, LmMnSGDMAERRS(lseq, 1), 0xFF);
1046 
1047 	/* Clear Mode 0 Buffer Parity Error. */
1048 	asd_write_reg_byte(asd_ha, LmMnBUFSTAT(lseq, 0), LmMnBUFPERR);
1049 
1050 	/* Clear Mode 0 Frame Error register. */
1051 	asd_write_reg_dword(asd_ha, LmMnFRMERR(lseq, 0), 0xFFFFFFFF);
1052 
1053 	/* Reset LSEQ external interrupt arbiter. */
1054 	asd_write_reg_byte(asd_ha, LmARP2INTCTL(lseq), RSTINTCTL);
1055 
1056 	/* Set the Phy SAS for the LmSEQ WWN. */
1057 	sas_addr = asd_ha->phys[lseq].phy_desc->sas_addr;
1058 	for (i = 0; i < SAS_ADDR_SIZE; i++)
1059 		asd_write_reg_byte(asd_ha, LmWWN(lseq) + i, sas_addr[i]);
1060 
1061 	/* Set the Transmit Size to 1024 bytes, 0 = 256 Dwords. */
1062 	asd_write_reg_byte(asd_ha, LmMnXMTSIZE(lseq, 1), 0);
1063 
1064 	/* Set the Bus Inactivity Time Limit Timer. */
1065 	asd_write_reg_word(asd_ha, LmBITL_TIMER(lseq), 9);
1066 
1067 	/* Enable SATA Port Multiplier. */
1068 	asd_write_reg_byte(asd_ha, LmMnSATAFS(lseq, 1), 0x80);
1069 
1070 	/* Initialize Interrupt Vector[0-10] address in Mode 3.
1071 	 * See the comment on CSEQ_INT_* */
1072 	asd_write_reg_word(asd_ha, LmM3INTVEC0(lseq), lseq_vecs[0]);
1073 	asd_write_reg_word(asd_ha, LmM3INTVEC1(lseq), lseq_vecs[1]);
1074 	asd_write_reg_word(asd_ha, LmM3INTVEC2(lseq), lseq_vecs[2]);
1075 	asd_write_reg_word(asd_ha, LmM3INTVEC3(lseq), lseq_vecs[3]);
1076 	asd_write_reg_word(asd_ha, LmM3INTVEC4(lseq), lseq_vecs[4]);
1077 	asd_write_reg_word(asd_ha, LmM3INTVEC5(lseq), lseq_vecs[5]);
1078 	asd_write_reg_word(asd_ha, LmM3INTVEC6(lseq), lseq_vecs[6]);
1079 	asd_write_reg_word(asd_ha, LmM3INTVEC7(lseq), lseq_vecs[7]);
1080 	asd_write_reg_word(asd_ha, LmM3INTVEC8(lseq), lseq_vecs[8]);
1081 	asd_write_reg_word(asd_ha, LmM3INTVEC9(lseq), lseq_vecs[9]);
1082 	asd_write_reg_word(asd_ha, LmM3INTVEC10(lseq), lseq_vecs[10]);
1083 	/*
1084 	 * Program the Link LED control, applicable only for
1085 	 * Chip Rev. B or later.
1086 	 */
1087 	asd_write_reg_dword(asd_ha, LmCONTROL(lseq),
1088 			    (LEDTIMER | LEDMODE_TXRX | LEDTIMERS_100ms));
1089 
1090 	/* Set the Align Rate for SAS and STP mode. */
1091 	asd_write_reg_byte(asd_ha, LmM1SASALIGN(lseq), SAS_ALIGN_DEFAULT);
1092 	asd_write_reg_byte(asd_ha, LmM1STPALIGN(lseq), STP_ALIGN_DEFAULT);
1093 }
1094 
1095 
1096 /**
1097  * asd_post_init_cseq -- clear CSEQ Mode n Int. status and Response mailbox
1098  * @asd_ha: pointer to host adapter struct
1099  */
asd_post_init_cseq(struct asd_ha_struct * asd_ha)1100 static void asd_post_init_cseq(struct asd_ha_struct *asd_ha)
1101 {
1102 	int i;
1103 
1104 	for (i = 0; i < 8; i++)
1105 		asd_write_reg_dword(asd_ha, CMnINT(i), 0xFFFFFFFF);
1106 	for (i = 0; i < 8; i++)
1107 		asd_read_reg_dword(asd_ha, CMnRSPMBX(i));
1108 	/* Reset the external interrupt arbiter. */
1109 	asd_write_reg_byte(asd_ha, CARP2INTCTL, RSTINTCTL);
1110 }
1111 
1112 /**
1113  * asd_init_ddb_0 -- initialize DDB 0
1114  * @asd_ha: pointer to host adapter structure
1115  *
1116  * Initialize DDB site 0 which is used internally by the sequencer.
1117  */
asd_init_ddb_0(struct asd_ha_struct * asd_ha)1118 static void asd_init_ddb_0(struct asd_ha_struct *asd_ha)
1119 {
1120 	int	i;
1121 
1122 	/* Zero out the DDB explicitly */
1123 	for (i = 0; i < sizeof(struct asd_ddb_seq_shared); i+=4)
1124 		asd_ddbsite_write_dword(asd_ha, 0, i, 0);
1125 
1126 	asd_ddbsite_write_word(asd_ha, 0,
1127 		 offsetof(struct asd_ddb_seq_shared, q_free_ddb_head), 0);
1128 	asd_ddbsite_write_word(asd_ha, 0,
1129 		 offsetof(struct asd_ddb_seq_shared, q_free_ddb_tail),
1130 			       asd_ha->hw_prof.max_ddbs-1);
1131 	asd_ddbsite_write_word(asd_ha, 0,
1132 		 offsetof(struct asd_ddb_seq_shared, q_free_ddb_cnt), 0);
1133 	asd_ddbsite_write_word(asd_ha, 0,
1134 		 offsetof(struct asd_ddb_seq_shared, q_used_ddb_head), 0xFFFF);
1135 	asd_ddbsite_write_word(asd_ha, 0,
1136 		 offsetof(struct asd_ddb_seq_shared, q_used_ddb_tail), 0xFFFF);
1137 	asd_ddbsite_write_word(asd_ha, 0,
1138 		 offsetof(struct asd_ddb_seq_shared, shared_mem_lock), 0);
1139 	asd_ddbsite_write_word(asd_ha, 0,
1140 		 offsetof(struct asd_ddb_seq_shared, smp_conn_tag), 0);
1141 	asd_ddbsite_write_word(asd_ha, 0,
1142 		 offsetof(struct asd_ddb_seq_shared, est_nexus_buf_cnt), 0);
1143 	asd_ddbsite_write_word(asd_ha, 0,
1144 		 offsetof(struct asd_ddb_seq_shared, est_nexus_buf_thresh),
1145 			       asd_ha->hw_prof.num_phys * 2);
1146 	asd_ddbsite_write_byte(asd_ha, 0,
1147 		 offsetof(struct asd_ddb_seq_shared, settable_max_contexts),0);
1148 	asd_ddbsite_write_byte(asd_ha, 0,
1149 	       offsetof(struct asd_ddb_seq_shared, conn_not_active), 0xFF);
1150 	asd_ddbsite_write_byte(asd_ha, 0,
1151 	       offsetof(struct asd_ddb_seq_shared, phy_is_up), 0x00);
1152 	/* DDB 0 is reserved */
1153 	set_bit(0, asd_ha->hw_prof.ddb_bitmap);
1154 }
1155 
asd_seq_init_ddb_sites(struct asd_ha_struct * asd_ha)1156 static void asd_seq_init_ddb_sites(struct asd_ha_struct *asd_ha)
1157 {
1158 	unsigned int i;
1159 	unsigned int ddb_site;
1160 
1161 	for (ddb_site = 0 ; ddb_site < ASD_MAX_DDBS; ddb_site++)
1162 		for (i = 0; i < sizeof(struct asd_ddb_ssp_smp_target_port); i+= 4)
1163 			asd_ddbsite_write_dword(asd_ha, ddb_site, i, 0);
1164 }
1165 
1166 /**
1167  * asd_seq_setup_seqs -- setup and initialize central and link sequencers
1168  * @asd_ha: pointer to host adapter structure
1169  */
asd_seq_setup_seqs(struct asd_ha_struct * asd_ha)1170 static void asd_seq_setup_seqs(struct asd_ha_struct *asd_ha)
1171 {
1172 	int 		lseq;
1173 	u8		lseq_mask;
1174 
1175 	/* Initialize DDB sites */
1176 	asd_seq_init_ddb_sites(asd_ha);
1177 
1178 	/* Initialize SCB sites. Done first to compute some values which
1179 	 * the rest of the init code depends on. */
1180 	asd_init_scb_sites(asd_ha);
1181 
1182 	/* Initialize CSEQ Scratch RAM registers. */
1183 	asd_init_cseq_scratch(asd_ha);
1184 
1185 	/* Initialize LmSEQ Scratch RAM registers. */
1186 	asd_init_lseq_scratch(asd_ha);
1187 
1188 	/* Initialize CSEQ CIO registers. */
1189 	asd_init_cseq_cio(asd_ha);
1190 
1191 	asd_init_ddb_0(asd_ha);
1192 
1193 	/* Initialize LmSEQ CIO registers. */
1194 	lseq_mask = asd_ha->hw_prof.enabled_phys;
1195 	for_each_sequencer(lseq_mask, lseq_mask, lseq)
1196 		asd_init_lseq_cio(asd_ha, lseq);
1197 	asd_post_init_cseq(asd_ha);
1198 }
1199 
1200 
1201 /**
1202  * asd_seq_start_cseq -- start the central sequencer, CSEQ
1203  * @asd_ha: pointer to host adapter structure
1204  */
asd_seq_start_cseq(struct asd_ha_struct * asd_ha)1205 static int asd_seq_start_cseq(struct asd_ha_struct *asd_ha)
1206 {
1207 	/* Reset the ARP2 instruction to location zero. */
1208 	asd_write_reg_word(asd_ha, CPRGMCNT, cseq_idle_loop);
1209 
1210 	/* Unpause the CSEQ  */
1211 	return asd_unpause_cseq(asd_ha);
1212 }
1213 
1214 /**
1215  * asd_seq_start_lseq -- start a link sequencer
1216  * @asd_ha: pointer to host adapter structure
1217  * @lseq: the link sequencer of interest
1218  */
asd_seq_start_lseq(struct asd_ha_struct * asd_ha,int lseq)1219 static int asd_seq_start_lseq(struct asd_ha_struct *asd_ha, int lseq)
1220 {
1221 	/* Reset the ARP2 instruction to location zero. */
1222 	asd_write_reg_word(asd_ha, LmPRGMCNT(lseq), lseq_idle_loop);
1223 
1224 	/* Unpause the LmSEQ  */
1225 	return asd_seq_unpause_lseq(asd_ha, lseq);
1226 }
1227 
asd_release_firmware(void)1228 int asd_release_firmware(void)
1229 {
1230 	if (sequencer_fw)
1231 		release_firmware(sequencer_fw);
1232 	return 0;
1233 }
1234 
asd_request_firmware(struct asd_ha_struct * asd_ha)1235 static int asd_request_firmware(struct asd_ha_struct *asd_ha)
1236 {
1237 	int err, i;
1238 	struct sequencer_file_header header;
1239 	const struct sequencer_file_header *hdr_ptr;
1240 	u32 csum = 0;
1241 	u16 *ptr_cseq_vecs, *ptr_lseq_vecs;
1242 
1243 	if (sequencer_fw)
1244 		/* already loaded */
1245 		return 0;
1246 
1247 	err = request_firmware(&sequencer_fw,
1248 			       SAS_RAZOR_SEQUENCER_FW_FILE,
1249 			       &asd_ha->pcidev->dev);
1250 	if (err)
1251 		return err;
1252 
1253 	hdr_ptr = (const struct sequencer_file_header *)sequencer_fw->data;
1254 
1255 	header.csum = le32_to_cpu(hdr_ptr->csum);
1256 	header.major = le32_to_cpu(hdr_ptr->major);
1257 	header.minor = le32_to_cpu(hdr_ptr->minor);
1258 	header.cseq_table_offset = le32_to_cpu(hdr_ptr->cseq_table_offset);
1259 	header.cseq_table_size = le32_to_cpu(hdr_ptr->cseq_table_size);
1260 	header.lseq_table_offset = le32_to_cpu(hdr_ptr->lseq_table_offset);
1261 	header.lseq_table_size = le32_to_cpu(hdr_ptr->lseq_table_size);
1262 	header.cseq_code_offset = le32_to_cpu(hdr_ptr->cseq_code_offset);
1263 	header.cseq_code_size = le32_to_cpu(hdr_ptr->cseq_code_size);
1264 	header.lseq_code_offset = le32_to_cpu(hdr_ptr->lseq_code_offset);
1265 	header.lseq_code_size = le32_to_cpu(hdr_ptr->lseq_code_size);
1266 	header.mode2_task = le16_to_cpu(hdr_ptr->mode2_task);
1267 	header.cseq_idle_loop = le16_to_cpu(hdr_ptr->cseq_idle_loop);
1268 	header.lseq_idle_loop = le16_to_cpu(hdr_ptr->lseq_idle_loop);
1269 
1270 	for (i = sizeof(header.csum); i < sequencer_fw->size; i++)
1271 		csum += sequencer_fw->data[i];
1272 
1273 	if (csum != header.csum) {
1274 		asd_printk("Firmware file checksum mismatch\n");
1275 		return -EINVAL;
1276 	}
1277 
1278 	if (header.cseq_table_size != CSEQ_NUM_VECS ||
1279 	    header.lseq_table_size != LSEQ_NUM_VECS) {
1280 		asd_printk("Firmware file table size mismatch\n");
1281 		return -EINVAL;
1282 	}
1283 
1284 	asd_printk("Found sequencer Firmware version %d.%d (%s)\n",
1285 		   header.major, header.minor, hdr_ptr->version);
1286 
1287 	if (header.major != SAS_RAZOR_SEQUENCER_FW_MAJOR) {
1288 		asd_printk("Firmware Major Version Mismatch;"
1289 			   "driver requires version %d.X",
1290 			   SAS_RAZOR_SEQUENCER_FW_MAJOR);
1291 		return -EINVAL;
1292 	}
1293 
1294 	ptr_cseq_vecs = (u16 *)&sequencer_fw->data[header.cseq_table_offset];
1295 	ptr_lseq_vecs = (u16 *)&sequencer_fw->data[header.lseq_table_offset];
1296 	mode2_task = header.mode2_task;
1297 	cseq_idle_loop = header.cseq_idle_loop;
1298 	lseq_idle_loop = header.lseq_idle_loop;
1299 
1300 	for (i = 0; i < CSEQ_NUM_VECS; i++)
1301 		cseq_vecs[i] = le16_to_cpu(ptr_cseq_vecs[i]);
1302 
1303 	for (i = 0; i < LSEQ_NUM_VECS; i++)
1304 		lseq_vecs[i] = le16_to_cpu(ptr_lseq_vecs[i]);
1305 
1306 	cseq_code = &sequencer_fw->data[header.cseq_code_offset];
1307 	cseq_code_size = header.cseq_code_size;
1308 	lseq_code = &sequencer_fw->data[header.lseq_code_offset];
1309 	lseq_code_size = header.lseq_code_size;
1310 
1311 	return 0;
1312 }
1313 
asd_init_seqs(struct asd_ha_struct * asd_ha)1314 int asd_init_seqs(struct asd_ha_struct *asd_ha)
1315 {
1316 	int err;
1317 
1318 	err = asd_request_firmware(asd_ha);
1319 
1320 	if (err) {
1321 		asd_printk("Failed to load sequencer firmware file %s, error %d\n",
1322 			   SAS_RAZOR_SEQUENCER_FW_FILE, err);
1323 		return err;
1324 	}
1325 
1326 	err = asd_seq_download_seqs(asd_ha);
1327 	if (err) {
1328 		asd_printk("couldn't download sequencers for %s\n",
1329 			   pci_name(asd_ha->pcidev));
1330 		return err;
1331 	}
1332 
1333 	asd_seq_setup_seqs(asd_ha);
1334 
1335 	return 0;
1336 }
1337 
asd_start_seqs(struct asd_ha_struct * asd_ha)1338 int asd_start_seqs(struct asd_ha_struct *asd_ha)
1339 {
1340 	int err;
1341 	u8  lseq_mask;
1342 	int lseq;
1343 
1344 	err = asd_seq_start_cseq(asd_ha);
1345 	if (err) {
1346 		asd_printk("couldn't start CSEQ for %s\n",
1347 			   pci_name(asd_ha->pcidev));
1348 		return err;
1349 	}
1350 
1351 	lseq_mask = asd_ha->hw_prof.enabled_phys;
1352 	for_each_sequencer(lseq_mask, lseq_mask, lseq) {
1353 		err = asd_seq_start_lseq(asd_ha, lseq);
1354 		if (err) {
1355 			asd_printk("coudln't start LSEQ %d for %s\n", lseq,
1356 				   pci_name(asd_ha->pcidev));
1357 			return err;
1358 		}
1359 	}
1360 
1361 	return 0;
1362 }
1363 
1364 /**
1365  * asd_update_port_links -- update port_map_by_links and phy_is_up
1366  * @sas_phy: pointer to the phy which has been added to a port
1367  *
1368  * 1) When a link reset has completed and we got BYTES DMAED with a
1369  * valid frame we call this function for that phy, to indicate that
1370  * the phy is up, i.e. we update the phy_is_up in DDB 0.  The
1371  * sequencer checks phy_is_up when pending SCBs are to be sent, and
1372  * when an open address frame has been received.
1373  *
1374  * 2) When we know of ports, we call this function to update the map
1375  * of phys participaing in that port, i.e. we update the
1376  * port_map_by_links in DDB 0.  When a HARD_RESET primitive has been
1377  * received, the sequencer disables all phys in that port.
1378  * port_map_by_links is also used as the conn_mask byte in the
1379  * initiator/target port DDB.
1380  */
asd_update_port_links(struct asd_ha_struct * asd_ha,struct asd_phy * phy)1381 void asd_update_port_links(struct asd_ha_struct *asd_ha, struct asd_phy *phy)
1382 {
1383 	const u8 phy_mask = (u8) phy->asd_port->phy_mask;
1384 	u8  phy_is_up;
1385 	u8  mask;
1386 	int i, err;
1387 	unsigned long flags;
1388 
1389 	spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags);
1390 	for_each_phy(phy_mask, mask, i)
1391 		asd_ddbsite_write_byte(asd_ha, 0,
1392 				       offsetof(struct asd_ddb_seq_shared,
1393 						port_map_by_links)+i,phy_mask);
1394 
1395 	for (i = 0; i < 12; i++) {
1396 		phy_is_up = asd_ddbsite_read_byte(asd_ha, 0,
1397 			  offsetof(struct asd_ddb_seq_shared, phy_is_up));
1398 		err = asd_ddbsite_update_byte(asd_ha, 0,
1399 				offsetof(struct asd_ddb_seq_shared, phy_is_up),
1400 				phy_is_up,
1401 				phy_is_up | phy_mask);
1402 		if (!err)
1403 			break;
1404 		else if (err == -EFAULT) {
1405 			asd_printk("phy_is_up: parity error in DDB 0\n");
1406 			break;
1407 		}
1408 	}
1409 	spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags);
1410 
1411 	if (err)
1412 		asd_printk("couldn't update DDB 0:error:%d\n", err);
1413 }
1414 
1415 MODULE_FIRMWARE(SAS_RAZOR_SEQUENCER_FW_FILE);
1416