1 /* 2 * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved. 3 * 4 * Authors: Shlomi Gridish <gridish@freescale.com> 5 * Li Yang <leoli@freescale.com> 6 * 7 * Description: 8 * QE UCC Slow API Set - UCC Slow specific routines implementations. 9 * 10 * This program is free software; you can redistribute it and/or modify it 11 * under the terms of the GNU General Public License as published by the 12 * Free Software Foundation; either version 2 of the License, or (at your 13 * option) any later version. 14 */ 15 #include <linux/kernel.h> 16 #include <linux/init.h> 17 #include <linux/errno.h> 18 #include <linux/slab.h> 19 #include <linux/stddef.h> 20 #include <linux/interrupt.h> 21 #include <linux/err.h> 22 #include <linux/export.h> 23 24 #include <asm/io.h> 25 #include <asm/immap_qe.h> 26 #include <asm/qe.h> 27 28 #include <asm/ucc.h> 29 #include <asm/ucc_slow.h> 30 ucc_slow_get_qe_cr_subblock(int uccs_num)31 u32 ucc_slow_get_qe_cr_subblock(int uccs_num) 32 { 33 switch (uccs_num) { 34 case 0: return QE_CR_SUBBLOCK_UCCSLOW1; 35 case 1: return QE_CR_SUBBLOCK_UCCSLOW2; 36 case 2: return QE_CR_SUBBLOCK_UCCSLOW3; 37 case 3: return QE_CR_SUBBLOCK_UCCSLOW4; 38 case 4: return QE_CR_SUBBLOCK_UCCSLOW5; 39 case 5: return QE_CR_SUBBLOCK_UCCSLOW6; 40 case 6: return QE_CR_SUBBLOCK_UCCSLOW7; 41 case 7: return QE_CR_SUBBLOCK_UCCSLOW8; 42 default: return QE_CR_SUBBLOCK_INVALID; 43 } 44 } 45 EXPORT_SYMBOL(ucc_slow_get_qe_cr_subblock); 46 ucc_slow_poll_transmitter_now(struct ucc_slow_private * uccs)47 void ucc_slow_poll_transmitter_now(struct ucc_slow_private * uccs) 48 { 49 out_be16(&uccs->us_regs->utodr, UCC_SLOW_TOD); 50 } 51 ucc_slow_graceful_stop_tx(struct ucc_slow_private * uccs)52 void ucc_slow_graceful_stop_tx(struct ucc_slow_private * uccs) 53 { 54 struct ucc_slow_info *us_info = uccs->us_info; 55 u32 id; 56 57 id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num); 58 qe_issue_cmd(QE_GRACEFUL_STOP_TX, id, 59 QE_CR_PROTOCOL_UNSPECIFIED, 0); 60 } 61 EXPORT_SYMBOL(ucc_slow_graceful_stop_tx); 62 ucc_slow_stop_tx(struct ucc_slow_private * uccs)63 void ucc_slow_stop_tx(struct ucc_slow_private * uccs) 64 { 65 struct ucc_slow_info *us_info = uccs->us_info; 66 u32 id; 67 68 id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num); 69 qe_issue_cmd(QE_STOP_TX, id, QE_CR_PROTOCOL_UNSPECIFIED, 0); 70 } 71 EXPORT_SYMBOL(ucc_slow_stop_tx); 72 ucc_slow_restart_tx(struct ucc_slow_private * uccs)73 void ucc_slow_restart_tx(struct ucc_slow_private * uccs) 74 { 75 struct ucc_slow_info *us_info = uccs->us_info; 76 u32 id; 77 78 id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num); 79 qe_issue_cmd(QE_RESTART_TX, id, QE_CR_PROTOCOL_UNSPECIFIED, 0); 80 } 81 EXPORT_SYMBOL(ucc_slow_restart_tx); 82 ucc_slow_enable(struct ucc_slow_private * uccs,enum comm_dir mode)83 void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode) 84 { 85 struct ucc_slow *us_regs; 86 u32 gumr_l; 87 88 us_regs = uccs->us_regs; 89 90 /* Enable reception and/or transmission on this UCC. */ 91 gumr_l = in_be32(&us_regs->gumr_l); 92 if (mode & COMM_DIR_TX) { 93 gumr_l |= UCC_SLOW_GUMR_L_ENT; 94 uccs->enabled_tx = 1; 95 } 96 if (mode & COMM_DIR_RX) { 97 gumr_l |= UCC_SLOW_GUMR_L_ENR; 98 uccs->enabled_rx = 1; 99 } 100 out_be32(&us_regs->gumr_l, gumr_l); 101 } 102 EXPORT_SYMBOL(ucc_slow_enable); 103 ucc_slow_disable(struct ucc_slow_private * uccs,enum comm_dir mode)104 void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode) 105 { 106 struct ucc_slow *us_regs; 107 u32 gumr_l; 108 109 us_regs = uccs->us_regs; 110 111 /* Disable reception and/or transmission on this UCC. */ 112 gumr_l = in_be32(&us_regs->gumr_l); 113 if (mode & COMM_DIR_TX) { 114 gumr_l &= ~UCC_SLOW_GUMR_L_ENT; 115 uccs->enabled_tx = 0; 116 } 117 if (mode & COMM_DIR_RX) { 118 gumr_l &= ~UCC_SLOW_GUMR_L_ENR; 119 uccs->enabled_rx = 0; 120 } 121 out_be32(&us_regs->gumr_l, gumr_l); 122 } 123 EXPORT_SYMBOL(ucc_slow_disable); 124 125 /* Initialize the UCC for Slow operations 126 * 127 * The caller should initialize the following us_info 128 */ ucc_slow_init(struct ucc_slow_info * us_info,struct ucc_slow_private ** uccs_ret)129 int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** uccs_ret) 130 { 131 struct ucc_slow_private *uccs; 132 u32 i; 133 struct ucc_slow __iomem *us_regs; 134 u32 gumr; 135 struct qe_bd *bd; 136 u32 id; 137 u32 command; 138 int ret = 0; 139 140 if (!us_info) 141 return -EINVAL; 142 143 /* check if the UCC port number is in range. */ 144 if ((us_info->ucc_num < 0) || (us_info->ucc_num > UCC_MAX_NUM - 1)) { 145 printk(KERN_ERR "%s: illegal UCC number\n", __func__); 146 return -EINVAL; 147 } 148 149 /* 150 * Set mrblr 151 * Check that 'max_rx_buf_length' is properly aligned (4), unless 152 * rfw is 1, meaning that QE accepts one byte at a time, unlike normal 153 * case when QE accepts 32 bits at a time. 154 */ 155 if ((!us_info->rfw) && 156 (us_info->max_rx_buf_length & (UCC_SLOW_MRBLR_ALIGNMENT - 1))) { 157 printk(KERN_ERR "max_rx_buf_length not aligned.\n"); 158 return -EINVAL; 159 } 160 161 uccs = kzalloc(sizeof(struct ucc_slow_private), GFP_KERNEL); 162 if (!uccs) { 163 printk(KERN_ERR "%s: Cannot allocate private data\n", 164 __func__); 165 return -ENOMEM; 166 } 167 168 /* Fill slow UCC structure */ 169 uccs->us_info = us_info; 170 /* Set the PHY base address */ 171 uccs->us_regs = ioremap(us_info->regs, sizeof(struct ucc_slow)); 172 if (uccs->us_regs == NULL) { 173 printk(KERN_ERR "%s: Cannot map UCC registers\n", __func__); 174 kfree(uccs); 175 return -ENOMEM; 176 } 177 178 uccs->saved_uccm = 0; 179 uccs->p_rx_frame = 0; 180 us_regs = uccs->us_regs; 181 uccs->p_ucce = (u16 *) & (us_regs->ucce); 182 uccs->p_uccm = (u16 *) & (us_regs->uccm); 183 #ifdef STATISTICS 184 uccs->rx_frames = 0; 185 uccs->tx_frames = 0; 186 uccs->rx_discarded = 0; 187 #endif /* STATISTICS */ 188 189 /* Get PRAM base */ 190 uccs->us_pram_offset = 191 qe_muram_alloc(UCC_SLOW_PRAM_SIZE, ALIGNMENT_OF_UCC_SLOW_PRAM); 192 if (IS_ERR_VALUE(uccs->us_pram_offset)) { 193 printk(KERN_ERR "%s: cannot allocate MURAM for PRAM", __func__); 194 ucc_slow_free(uccs); 195 return -ENOMEM; 196 } 197 id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num); 198 qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, id, us_info->protocol, 199 uccs->us_pram_offset); 200 201 uccs->us_pram = qe_muram_addr(uccs->us_pram_offset); 202 203 /* Set UCC to slow type */ 204 ret = ucc_set_type(us_info->ucc_num, UCC_SPEED_TYPE_SLOW); 205 if (ret) { 206 printk(KERN_ERR "%s: cannot set UCC type", __func__); 207 ucc_slow_free(uccs); 208 return ret; 209 } 210 211 out_be16(&uccs->us_pram->mrblr, us_info->max_rx_buf_length); 212 213 INIT_LIST_HEAD(&uccs->confQ); 214 215 /* Allocate BDs. */ 216 uccs->rx_base_offset = 217 qe_muram_alloc(us_info->rx_bd_ring_len * sizeof(struct qe_bd), 218 QE_ALIGNMENT_OF_BD); 219 if (IS_ERR_VALUE(uccs->rx_base_offset)) { 220 printk(KERN_ERR "%s: cannot allocate %u RX BDs\n", __func__, 221 us_info->rx_bd_ring_len); 222 uccs->rx_base_offset = 0; 223 ucc_slow_free(uccs); 224 return -ENOMEM; 225 } 226 227 uccs->tx_base_offset = 228 qe_muram_alloc(us_info->tx_bd_ring_len * sizeof(struct qe_bd), 229 QE_ALIGNMENT_OF_BD); 230 if (IS_ERR_VALUE(uccs->tx_base_offset)) { 231 printk(KERN_ERR "%s: cannot allocate TX BDs", __func__); 232 uccs->tx_base_offset = 0; 233 ucc_slow_free(uccs); 234 return -ENOMEM; 235 } 236 237 /* Init Tx bds */ 238 bd = uccs->confBd = uccs->tx_bd = qe_muram_addr(uccs->tx_base_offset); 239 for (i = 0; i < us_info->tx_bd_ring_len - 1; i++) { 240 /* clear bd buffer */ 241 out_be32(&bd->buf, 0); 242 /* set bd status and length */ 243 out_be32((u32 *) bd, 0); 244 bd++; 245 } 246 /* for last BD set Wrap bit */ 247 out_be32(&bd->buf, 0); 248 out_be32((u32 *) bd, cpu_to_be32(T_W)); 249 250 /* Init Rx bds */ 251 bd = uccs->rx_bd = qe_muram_addr(uccs->rx_base_offset); 252 for (i = 0; i < us_info->rx_bd_ring_len - 1; i++) { 253 /* set bd status and length */ 254 out_be32((u32*)bd, 0); 255 /* clear bd buffer */ 256 out_be32(&bd->buf, 0); 257 bd++; 258 } 259 /* for last BD set Wrap bit */ 260 out_be32((u32*)bd, cpu_to_be32(R_W)); 261 out_be32(&bd->buf, 0); 262 263 /* Set GUMR (For more details see the hardware spec.). */ 264 /* gumr_h */ 265 gumr = us_info->tcrc; 266 if (us_info->cdp) 267 gumr |= UCC_SLOW_GUMR_H_CDP; 268 if (us_info->ctsp) 269 gumr |= UCC_SLOW_GUMR_H_CTSP; 270 if (us_info->cds) 271 gumr |= UCC_SLOW_GUMR_H_CDS; 272 if (us_info->ctss) 273 gumr |= UCC_SLOW_GUMR_H_CTSS; 274 if (us_info->tfl) 275 gumr |= UCC_SLOW_GUMR_H_TFL; 276 if (us_info->rfw) 277 gumr |= UCC_SLOW_GUMR_H_RFW; 278 if (us_info->txsy) 279 gumr |= UCC_SLOW_GUMR_H_TXSY; 280 if (us_info->rtsm) 281 gumr |= UCC_SLOW_GUMR_H_RTSM; 282 out_be32(&us_regs->gumr_h, gumr); 283 284 /* gumr_l */ 285 gumr = us_info->tdcr | us_info->rdcr | us_info->tenc | us_info->renc | 286 us_info->diag | us_info->mode; 287 if (us_info->tci) 288 gumr |= UCC_SLOW_GUMR_L_TCI; 289 if (us_info->rinv) 290 gumr |= UCC_SLOW_GUMR_L_RINV; 291 if (us_info->tinv) 292 gumr |= UCC_SLOW_GUMR_L_TINV; 293 if (us_info->tend) 294 gumr |= UCC_SLOW_GUMR_L_TEND; 295 out_be32(&us_regs->gumr_l, gumr); 296 297 /* Function code registers */ 298 299 /* if the data is in cachable memory, the 'global' */ 300 /* in the function code should be set. */ 301 uccs->us_pram->tbmr = UCC_BMR_BO_BE; 302 uccs->us_pram->rbmr = UCC_BMR_BO_BE; 303 304 /* rbase, tbase are offsets from MURAM base */ 305 out_be16(&uccs->us_pram->rbase, uccs->rx_base_offset); 306 out_be16(&uccs->us_pram->tbase, uccs->tx_base_offset); 307 308 /* Mux clocking */ 309 /* Grant Support */ 310 ucc_set_qe_mux_grant(us_info->ucc_num, us_info->grant_support); 311 /* Breakpoint Support */ 312 ucc_set_qe_mux_bkpt(us_info->ucc_num, us_info->brkpt_support); 313 /* Set Tsa or NMSI mode. */ 314 ucc_set_qe_mux_tsa(us_info->ucc_num, us_info->tsa); 315 /* If NMSI (not Tsa), set Tx and Rx clock. */ 316 if (!us_info->tsa) { 317 /* Rx clock routing */ 318 if (ucc_set_qe_mux_rxtx(us_info->ucc_num, us_info->rx_clock, 319 COMM_DIR_RX)) { 320 printk(KERN_ERR "%s: illegal value for RX clock\n", 321 __func__); 322 ucc_slow_free(uccs); 323 return -EINVAL; 324 } 325 /* Tx clock routing */ 326 if (ucc_set_qe_mux_rxtx(us_info->ucc_num, us_info->tx_clock, 327 COMM_DIR_TX)) { 328 printk(KERN_ERR "%s: illegal value for TX clock\n", 329 __func__); 330 ucc_slow_free(uccs); 331 return -EINVAL; 332 } 333 } 334 335 /* Set interrupt mask register at UCC level. */ 336 out_be16(&us_regs->uccm, us_info->uccm_mask); 337 338 /* First, clear anything pending at UCC level, 339 * otherwise, old garbage may come through 340 * as soon as the dam is opened. */ 341 342 /* Writing '1' clears */ 343 out_be16(&us_regs->ucce, 0xffff); 344 345 /* Issue QE Init command */ 346 if (us_info->init_tx && us_info->init_rx) 347 command = QE_INIT_TX_RX; 348 else if (us_info->init_tx) 349 command = QE_INIT_TX; 350 else 351 command = QE_INIT_RX; /* We know at least one is TRUE */ 352 353 qe_issue_cmd(command, id, us_info->protocol, 0); 354 355 *uccs_ret = uccs; 356 return 0; 357 } 358 EXPORT_SYMBOL(ucc_slow_init); 359 ucc_slow_free(struct ucc_slow_private * uccs)360 void ucc_slow_free(struct ucc_slow_private * uccs) 361 { 362 if (!uccs) 363 return; 364 365 if (uccs->rx_base_offset) 366 qe_muram_free(uccs->rx_base_offset); 367 368 if (uccs->tx_base_offset) 369 qe_muram_free(uccs->tx_base_offset); 370 371 if (uccs->us_pram) 372 qe_muram_free(uccs->us_pram_offset); 373 374 if (uccs->us_regs) 375 iounmap(uccs->us_regs); 376 377 kfree(uccs); 378 } 379 EXPORT_SYMBOL(ucc_slow_free); 380 381