• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Intel PCH/PCU SPI flash driver.
3  *
4  * Copyright (C) 2016, Intel Corporation
5  * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 
12 #include <linux/err.h>
13 #include <linux/io.h>
14 #include <linux/iopoll.h>
15 #include <linux/module.h>
16 #include <linux/sched.h>
17 #include <linux/sizes.h>
18 #include <linux/mtd/mtd.h>
19 #include <linux/mtd/partitions.h>
20 #include <linux/mtd/spi-nor.h>
21 #include <linux/platform_data/intel-spi.h>
22 
23 #include "intel-spi.h"
24 
25 /* Offsets are from @ispi->base */
26 #define BFPREG				0x00
27 
28 #define HSFSTS_CTL			0x04
29 #define HSFSTS_CTL_FSMIE		BIT(31)
30 #define HSFSTS_CTL_FDBC_SHIFT		24
31 #define HSFSTS_CTL_FDBC_MASK		(0x3f << HSFSTS_CTL_FDBC_SHIFT)
32 
33 #define HSFSTS_CTL_FCYCLE_SHIFT		17
34 #define HSFSTS_CTL_FCYCLE_MASK		(0x0f << HSFSTS_CTL_FCYCLE_SHIFT)
35 /* HW sequencer opcodes */
36 #define HSFSTS_CTL_FCYCLE_READ		(0x00 << HSFSTS_CTL_FCYCLE_SHIFT)
37 #define HSFSTS_CTL_FCYCLE_WRITE		(0x02 << HSFSTS_CTL_FCYCLE_SHIFT)
38 #define HSFSTS_CTL_FCYCLE_ERASE		(0x03 << HSFSTS_CTL_FCYCLE_SHIFT)
39 #define HSFSTS_CTL_FCYCLE_ERASE_64K	(0x04 << HSFSTS_CTL_FCYCLE_SHIFT)
40 #define HSFSTS_CTL_FCYCLE_RDID		(0x06 << HSFSTS_CTL_FCYCLE_SHIFT)
41 #define HSFSTS_CTL_FCYCLE_WRSR		(0x07 << HSFSTS_CTL_FCYCLE_SHIFT)
42 #define HSFSTS_CTL_FCYCLE_RDSR		(0x08 << HSFSTS_CTL_FCYCLE_SHIFT)
43 
44 #define HSFSTS_CTL_FGO			BIT(16)
45 #define HSFSTS_CTL_FLOCKDN		BIT(15)
46 #define HSFSTS_CTL_FDV			BIT(14)
47 #define HSFSTS_CTL_SCIP			BIT(5)
48 #define HSFSTS_CTL_AEL			BIT(2)
49 #define HSFSTS_CTL_FCERR		BIT(1)
50 #define HSFSTS_CTL_FDONE		BIT(0)
51 
52 #define FADDR				0x08
53 #define DLOCK				0x0c
54 #define FDATA(n)			(0x10 + ((n) * 4))
55 
56 #define FRACC				0x50
57 
58 #define FREG(n)				(0x54 + ((n) * 4))
59 #define FREG_BASE_MASK			0x3fff
60 #define FREG_LIMIT_SHIFT		16
61 #define FREG_LIMIT_MASK			(0x03fff << FREG_LIMIT_SHIFT)
62 
63 /* Offset is from @ispi->pregs */
64 #define PR(n)				((n) * 4)
65 #define PR_WPE				BIT(31)
66 #define PR_LIMIT_SHIFT			16
67 #define PR_LIMIT_MASK			(0x3fff << PR_LIMIT_SHIFT)
68 #define PR_RPE				BIT(15)
69 #define PR_BASE_MASK			0x3fff
70 /* Last PR is GPR0 */
71 #define PR_NUM				(5 + 1)
72 
73 /* Offsets are from @ispi->sregs */
74 #define SSFSTS_CTL			0x00
75 #define SSFSTS_CTL_FSMIE		BIT(23)
76 #define SSFSTS_CTL_DS			BIT(22)
77 #define SSFSTS_CTL_DBC_SHIFT		16
78 #define SSFSTS_CTL_SPOP			BIT(11)
79 #define SSFSTS_CTL_ACS			BIT(10)
80 #define SSFSTS_CTL_SCGO			BIT(9)
81 #define SSFSTS_CTL_COP_SHIFT		12
82 #define SSFSTS_CTL_FRS			BIT(7)
83 #define SSFSTS_CTL_DOFRS		BIT(6)
84 #define SSFSTS_CTL_AEL			BIT(4)
85 #define SSFSTS_CTL_FCERR		BIT(3)
86 #define SSFSTS_CTL_FDONE		BIT(2)
87 #define SSFSTS_CTL_SCIP			BIT(0)
88 
89 #define PREOP_OPTYPE			0x04
90 #define OPMENU0				0x08
91 #define OPMENU1				0x0c
92 
93 /* CPU specifics */
94 #define BYT_PR				0x74
95 #define BYT_SSFSTS_CTL			0x90
96 #define BYT_BCR				0xfc
97 #define BYT_BCR_WPD			BIT(0)
98 #define BYT_FREG_NUM			5
99 
100 #define LPT_PR				0x74
101 #define LPT_SSFSTS_CTL			0x90
102 #define LPT_FREG_NUM			5
103 
104 #define BXT_PR				0x84
105 #define BXT_SSFSTS_CTL			0xa0
106 #define BXT_FREG_NUM			12
107 
108 #define INTEL_SPI_TIMEOUT		5000 /* ms */
109 #define INTEL_SPI_FIFO_SZ		64
110 
111 /**
112  * struct intel_spi - Driver private data
113  * @dev: Device pointer
114  * @info: Pointer to board specific info
115  * @nor: SPI NOR layer structure
116  * @base: Beginning of MMIO space
117  * @pregs: Start of protection registers
118  * @sregs: Start of software sequencer registers
119  * @nregions: Maximum number of regions
120  * @writeable: Is the chip writeable
121  * @swseq: Use SW sequencer in register reads/writes
122  * @erase_64k: 64k erase supported
123  * @opcodes: Opcodes which are supported. This are programmed by BIOS
124  *           before it locks down the controller.
125  * @preopcodes: Preopcodes which are supported.
126  */
127 struct intel_spi {
128 	struct device *dev;
129 	const struct intel_spi_boardinfo *info;
130 	struct spi_nor nor;
131 	void __iomem *base;
132 	void __iomem *pregs;
133 	void __iomem *sregs;
134 	size_t nregions;
135 	bool writeable;
136 	bool swseq;
137 	bool erase_64k;
138 	u8 opcodes[8];
139 	u8 preopcodes[2];
140 };
141 
142 static bool writeable;
143 module_param(writeable, bool, 0);
144 MODULE_PARM_DESC(writeable, "Enable write access to SPI flash chip (default=0)");
145 
intel_spi_dump_regs(struct intel_spi * ispi)146 static void intel_spi_dump_regs(struct intel_spi *ispi)
147 {
148 	u32 value;
149 	int i;
150 
151 	dev_dbg(ispi->dev, "BFPREG=0x%08x\n", readl(ispi->base + BFPREG));
152 
153 	value = readl(ispi->base + HSFSTS_CTL);
154 	dev_dbg(ispi->dev, "HSFSTS_CTL=0x%08x\n", value);
155 	if (value & HSFSTS_CTL_FLOCKDN)
156 		dev_dbg(ispi->dev, "-> Locked\n");
157 
158 	dev_dbg(ispi->dev, "FADDR=0x%08x\n", readl(ispi->base + FADDR));
159 	dev_dbg(ispi->dev, "DLOCK=0x%08x\n", readl(ispi->base + DLOCK));
160 
161 	for (i = 0; i < 16; i++)
162 		dev_dbg(ispi->dev, "FDATA(%d)=0x%08x\n",
163 			i, readl(ispi->base + FDATA(i)));
164 
165 	dev_dbg(ispi->dev, "FRACC=0x%08x\n", readl(ispi->base + FRACC));
166 
167 	for (i = 0; i < ispi->nregions; i++)
168 		dev_dbg(ispi->dev, "FREG(%d)=0x%08x\n", i,
169 			readl(ispi->base + FREG(i)));
170 	for (i = 0; i < PR_NUM; i++)
171 		dev_dbg(ispi->dev, "PR(%d)=0x%08x\n", i,
172 			readl(ispi->pregs + PR(i)));
173 
174 	value = readl(ispi->sregs + SSFSTS_CTL);
175 	dev_dbg(ispi->dev, "SSFSTS_CTL=0x%08x\n", value);
176 	dev_dbg(ispi->dev, "PREOP_OPTYPE=0x%08x\n",
177 		readl(ispi->sregs + PREOP_OPTYPE));
178 	dev_dbg(ispi->dev, "OPMENU0=0x%08x\n", readl(ispi->sregs + OPMENU0));
179 	dev_dbg(ispi->dev, "OPMENU1=0x%08x\n", readl(ispi->sregs + OPMENU1));
180 
181 	if (ispi->info->type == INTEL_SPI_BYT)
182 		dev_dbg(ispi->dev, "BCR=0x%08x\n", readl(ispi->base + BYT_BCR));
183 
184 	dev_dbg(ispi->dev, "Protected regions:\n");
185 	for (i = 0; i < PR_NUM; i++) {
186 		u32 base, limit;
187 
188 		value = readl(ispi->pregs + PR(i));
189 		if (!(value & (PR_WPE | PR_RPE)))
190 			continue;
191 
192 		limit = (value & PR_LIMIT_MASK) >> PR_LIMIT_SHIFT;
193 		base = value & PR_BASE_MASK;
194 
195 		dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x [%c%c]\n",
196 			 i, base << 12, (limit << 12) | 0xfff,
197 			 value & PR_WPE ? 'W' : '.',
198 			 value & PR_RPE ? 'R' : '.');
199 	}
200 
201 	dev_dbg(ispi->dev, "Flash regions:\n");
202 	for (i = 0; i < ispi->nregions; i++) {
203 		u32 region, base, limit;
204 
205 		region = readl(ispi->base + FREG(i));
206 		base = region & FREG_BASE_MASK;
207 		limit = (region & FREG_LIMIT_MASK) >> FREG_LIMIT_SHIFT;
208 
209 		if (base >= limit || (i > 0 && limit == 0))
210 			dev_dbg(ispi->dev, " %02d disabled\n", i);
211 		else
212 			dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x\n",
213 				 i, base << 12, (limit << 12) | 0xfff);
214 	}
215 
216 	dev_dbg(ispi->dev, "Using %cW sequencer for register access\n",
217 		ispi->swseq ? 'S' : 'H');
218 }
219 
220 /* Reads max INTEL_SPI_FIFO_SZ bytes from the device fifo */
intel_spi_read_block(struct intel_spi * ispi,void * buf,size_t size)221 static int intel_spi_read_block(struct intel_spi *ispi, void *buf, size_t size)
222 {
223 	size_t bytes;
224 	int i = 0;
225 
226 	if (size > INTEL_SPI_FIFO_SZ)
227 		return -EINVAL;
228 
229 	while (size > 0) {
230 		bytes = min_t(size_t, size, 4);
231 		memcpy_fromio(buf, ispi->base + FDATA(i), bytes);
232 		size -= bytes;
233 		buf += bytes;
234 		i++;
235 	}
236 
237 	return 0;
238 }
239 
240 /* Writes max INTEL_SPI_FIFO_SZ bytes to the device fifo */
intel_spi_write_block(struct intel_spi * ispi,const void * buf,size_t size)241 static int intel_spi_write_block(struct intel_spi *ispi, const void *buf,
242 				 size_t size)
243 {
244 	size_t bytes;
245 	int i = 0;
246 
247 	if (size > INTEL_SPI_FIFO_SZ)
248 		return -EINVAL;
249 
250 	while (size > 0) {
251 		bytes = min_t(size_t, size, 4);
252 		memcpy_toio(ispi->base + FDATA(i), buf, bytes);
253 		size -= bytes;
254 		buf += bytes;
255 		i++;
256 	}
257 
258 	return 0;
259 }
260 
intel_spi_wait_hw_busy(struct intel_spi * ispi)261 static int intel_spi_wait_hw_busy(struct intel_spi *ispi)
262 {
263 	u32 val;
264 
265 	return readl_poll_timeout(ispi->base + HSFSTS_CTL, val,
266 				  !(val & HSFSTS_CTL_SCIP), 0,
267 				  INTEL_SPI_TIMEOUT * 1000);
268 }
269 
intel_spi_wait_sw_busy(struct intel_spi * ispi)270 static int intel_spi_wait_sw_busy(struct intel_spi *ispi)
271 {
272 	u32 val;
273 
274 	return readl_poll_timeout(ispi->sregs + SSFSTS_CTL, val,
275 				  !(val & SSFSTS_CTL_SCIP), 0,
276 				  INTEL_SPI_TIMEOUT * 1000);
277 }
278 
intel_spi_init(struct intel_spi * ispi)279 static int intel_spi_init(struct intel_spi *ispi)
280 {
281 	u32 opmenu0, opmenu1, val;
282 	int i;
283 
284 	switch (ispi->info->type) {
285 	case INTEL_SPI_BYT:
286 		ispi->sregs = ispi->base + BYT_SSFSTS_CTL;
287 		ispi->pregs = ispi->base + BYT_PR;
288 		ispi->nregions = BYT_FREG_NUM;
289 
290 		if (writeable) {
291 			/* Disable write protection */
292 			val = readl(ispi->base + BYT_BCR);
293 			if (!(val & BYT_BCR_WPD)) {
294 				val |= BYT_BCR_WPD;
295 				writel(val, ispi->base + BYT_BCR);
296 				val = readl(ispi->base + BYT_BCR);
297 			}
298 
299 			ispi->writeable = !!(val & BYT_BCR_WPD);
300 		}
301 
302 		break;
303 
304 	case INTEL_SPI_LPT:
305 		ispi->sregs = ispi->base + LPT_SSFSTS_CTL;
306 		ispi->pregs = ispi->base + LPT_PR;
307 		ispi->nregions = LPT_FREG_NUM;
308 		break;
309 
310 	case INTEL_SPI_BXT:
311 		ispi->sregs = ispi->base + BXT_SSFSTS_CTL;
312 		ispi->pregs = ispi->base + BXT_PR;
313 		ispi->nregions = BXT_FREG_NUM;
314 		ispi->erase_64k = true;
315 		break;
316 
317 	default:
318 		return -EINVAL;
319 	}
320 
321 	/* Disable #SMI generation */
322 	val = readl(ispi->base + HSFSTS_CTL);
323 	val &= ~HSFSTS_CTL_FSMIE;
324 	writel(val, ispi->base + HSFSTS_CTL);
325 
326 	/*
327 	 * BIOS programs allowed opcodes and then locks down the register.
328 	 * So read back what opcodes it decided to support. That's the set
329 	 * we are going to support as well.
330 	 */
331 	opmenu0 = readl(ispi->sregs + OPMENU0);
332 	opmenu1 = readl(ispi->sregs + OPMENU1);
333 
334 	/*
335 	 * Some controllers can only do basic operations using hardware
336 	 * sequencer. All other operations are supposed to be carried out
337 	 * using software sequencer. If we find that BIOS has programmed
338 	 * opcodes for the software sequencer we use that over the hardware
339 	 * sequencer.
340 	 */
341 	if (opmenu0 && opmenu1) {
342 		for (i = 0; i < ARRAY_SIZE(ispi->opcodes) / 2; i++) {
343 			ispi->opcodes[i] = opmenu0 >> i * 8;
344 			ispi->opcodes[i + 4] = opmenu1 >> i * 8;
345 		}
346 
347 		val = readl(ispi->sregs + PREOP_OPTYPE);
348 		ispi->preopcodes[0] = val;
349 		ispi->preopcodes[1] = val >> 8;
350 
351 		/* Disable #SMI generation from SW sequencer */
352 		val = readl(ispi->sregs + SSFSTS_CTL);
353 		val &= ~SSFSTS_CTL_FSMIE;
354 		writel(val, ispi->sregs + SSFSTS_CTL);
355 
356 		ispi->swseq = true;
357 	}
358 
359 	intel_spi_dump_regs(ispi);
360 
361 	return 0;
362 }
363 
intel_spi_opcode_index(struct intel_spi * ispi,u8 opcode)364 static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode)
365 {
366 	int i;
367 
368 	for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++)
369 		if (ispi->opcodes[i] == opcode)
370 			return i;
371 	return -EINVAL;
372 }
373 
intel_spi_hw_cycle(struct intel_spi * ispi,u8 opcode,u8 * buf,int len)374 static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf,
375 			      int len)
376 {
377 	u32 val, status;
378 	int ret;
379 
380 	val = readl(ispi->base + HSFSTS_CTL);
381 	val &= ~(HSFSTS_CTL_FCYCLE_MASK | HSFSTS_CTL_FDBC_MASK);
382 
383 	switch (opcode) {
384 	case SPINOR_OP_RDID:
385 		val |= HSFSTS_CTL_FCYCLE_RDID;
386 		break;
387 	case SPINOR_OP_WRSR:
388 		val |= HSFSTS_CTL_FCYCLE_WRSR;
389 		break;
390 	case SPINOR_OP_RDSR:
391 		val |= HSFSTS_CTL_FCYCLE_RDSR;
392 		break;
393 	default:
394 		return -EINVAL;
395 	}
396 
397 	val |= (len - 1) << HSFSTS_CTL_FDBC_SHIFT;
398 	val |= HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
399 	val |= HSFSTS_CTL_FGO;
400 	writel(val, ispi->base + HSFSTS_CTL);
401 
402 	ret = intel_spi_wait_hw_busy(ispi);
403 	if (ret)
404 		return ret;
405 
406 	status = readl(ispi->base + HSFSTS_CTL);
407 	if (status & HSFSTS_CTL_FCERR)
408 		return -EIO;
409 	else if (status & HSFSTS_CTL_AEL)
410 		return -EACCES;
411 
412 	return 0;
413 }
414 
intel_spi_sw_cycle(struct intel_spi * ispi,u8 opcode,u8 * buf,int len)415 static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf,
416 			      int len)
417 {
418 	u32 val, status;
419 	int ret;
420 
421 	ret = intel_spi_opcode_index(ispi, opcode);
422 	if (ret < 0)
423 		return ret;
424 
425 	val = ((len - 1) << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS;
426 	val |= ret << SSFSTS_CTL_COP_SHIFT;
427 	val |= SSFSTS_CTL_FCERR | SSFSTS_CTL_FDONE;
428 	val |= SSFSTS_CTL_SCGO;
429 	writel(val, ispi->sregs + SSFSTS_CTL);
430 
431 	ret = intel_spi_wait_sw_busy(ispi);
432 	if (ret)
433 		return ret;
434 
435 	status = readl(ispi->sregs + SSFSTS_CTL);
436 	if (status & SSFSTS_CTL_FCERR)
437 		return -EIO;
438 	else if (status & SSFSTS_CTL_AEL)
439 		return -EACCES;
440 
441 	return 0;
442 }
443 
intel_spi_read_reg(struct spi_nor * nor,u8 opcode,u8 * buf,int len)444 static int intel_spi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
445 {
446 	struct intel_spi *ispi = nor->priv;
447 	int ret;
448 
449 	/* Address of the first chip */
450 	writel(0, ispi->base + FADDR);
451 
452 	if (ispi->swseq)
453 		ret = intel_spi_sw_cycle(ispi, opcode, buf, len);
454 	else
455 		ret = intel_spi_hw_cycle(ispi, opcode, buf, len);
456 
457 	if (ret)
458 		return ret;
459 
460 	return intel_spi_read_block(ispi, buf, len);
461 }
462 
intel_spi_write_reg(struct spi_nor * nor,u8 opcode,u8 * buf,int len)463 static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
464 {
465 	struct intel_spi *ispi = nor->priv;
466 	int ret;
467 
468 	/*
469 	 * This is handled with atomic operation and preop code in Intel
470 	 * controller so skip it here now.
471 	 */
472 	if (opcode == SPINOR_OP_WREN)
473 		return 0;
474 
475 	writel(0, ispi->base + FADDR);
476 
477 	/* Write the value beforehand */
478 	ret = intel_spi_write_block(ispi, buf, len);
479 	if (ret)
480 		return ret;
481 
482 	if (ispi->swseq)
483 		return intel_spi_sw_cycle(ispi, opcode, buf, len);
484 	return intel_spi_hw_cycle(ispi, opcode, buf, len);
485 }
486 
intel_spi_read(struct spi_nor * nor,loff_t from,size_t len,u_char * read_buf)487 static ssize_t intel_spi_read(struct spi_nor *nor, loff_t from, size_t len,
488 			      u_char *read_buf)
489 {
490 	struct intel_spi *ispi = nor->priv;
491 	size_t block_size, retlen = 0;
492 	u32 val, status;
493 	ssize_t ret;
494 
495 	switch (nor->read_opcode) {
496 	case SPINOR_OP_READ:
497 	case SPINOR_OP_READ_FAST:
498 		break;
499 	default:
500 		return -EINVAL;
501 	}
502 
503 	while (len > 0) {
504 		block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ);
505 
506 		/* Read cannot cross 4K boundary */
507 		block_size = min_t(loff_t, from + block_size,
508 				   round_up(from + 1, SZ_4K)) - from;
509 
510 		writel(from, ispi->base + FADDR);
511 
512 		val = readl(ispi->base + HSFSTS_CTL);
513 		val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
514 		val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
515 		val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT;
516 		val |= HSFSTS_CTL_FCYCLE_READ;
517 		val |= HSFSTS_CTL_FGO;
518 		writel(val, ispi->base + HSFSTS_CTL);
519 
520 		ret = intel_spi_wait_hw_busy(ispi);
521 		if (ret)
522 			return ret;
523 
524 		status = readl(ispi->base + HSFSTS_CTL);
525 		if (status & HSFSTS_CTL_FCERR)
526 			ret = -EIO;
527 		else if (status & HSFSTS_CTL_AEL)
528 			ret = -EACCES;
529 
530 		if (ret < 0) {
531 			dev_err(ispi->dev, "read error: %llx: %#x\n", from,
532 				status);
533 			return ret;
534 		}
535 
536 		ret = intel_spi_read_block(ispi, read_buf, block_size);
537 		if (ret)
538 			return ret;
539 
540 		len -= block_size;
541 		from += block_size;
542 		retlen += block_size;
543 		read_buf += block_size;
544 	}
545 
546 	return retlen;
547 }
548 
intel_spi_write(struct spi_nor * nor,loff_t to,size_t len,const u_char * write_buf)549 static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len,
550 			       const u_char *write_buf)
551 {
552 	struct intel_spi *ispi = nor->priv;
553 	size_t block_size, retlen = 0;
554 	u32 val, status;
555 	ssize_t ret;
556 
557 	while (len > 0) {
558 		block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ);
559 
560 		/* Write cannot cross 4K boundary */
561 		block_size = min_t(loff_t, to + block_size,
562 				   round_up(to + 1, SZ_4K)) - to;
563 
564 		writel(to, ispi->base + FADDR);
565 
566 		val = readl(ispi->base + HSFSTS_CTL);
567 		val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
568 		val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
569 		val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT;
570 		val |= HSFSTS_CTL_FCYCLE_WRITE;
571 
572 		/* Write enable */
573 		if (ispi->preopcodes[1] == SPINOR_OP_WREN)
574 			val |= SSFSTS_CTL_SPOP;
575 		val |= SSFSTS_CTL_ACS;
576 		writel(val, ispi->base + HSFSTS_CTL);
577 
578 		ret = intel_spi_write_block(ispi, write_buf, block_size);
579 		if (ret) {
580 			dev_err(ispi->dev, "failed to write block\n");
581 			return ret;
582 		}
583 
584 		/* Start the write now */
585 		val = readl(ispi->base + HSFSTS_CTL);
586 		writel(val | HSFSTS_CTL_FGO, ispi->base + HSFSTS_CTL);
587 
588 		ret = intel_spi_wait_hw_busy(ispi);
589 		if (ret) {
590 			dev_err(ispi->dev, "timeout\n");
591 			return ret;
592 		}
593 
594 		status = readl(ispi->base + HSFSTS_CTL);
595 		if (status & HSFSTS_CTL_FCERR)
596 			ret = -EIO;
597 		else if (status & HSFSTS_CTL_AEL)
598 			ret = -EACCES;
599 
600 		if (ret < 0) {
601 			dev_err(ispi->dev, "write error: %llx: %#x\n", to,
602 				status);
603 			return ret;
604 		}
605 
606 		len -= block_size;
607 		to += block_size;
608 		retlen += block_size;
609 		write_buf += block_size;
610 	}
611 
612 	return retlen;
613 }
614 
intel_spi_erase(struct spi_nor * nor,loff_t offs)615 static int intel_spi_erase(struct spi_nor *nor, loff_t offs)
616 {
617 	size_t erase_size, len = nor->mtd.erasesize;
618 	struct intel_spi *ispi = nor->priv;
619 	u32 val, status, cmd;
620 	int ret;
621 
622 	/* If the hardware can do 64k erase use that when possible */
623 	if (len >= SZ_64K && ispi->erase_64k) {
624 		cmd = HSFSTS_CTL_FCYCLE_ERASE_64K;
625 		erase_size = SZ_64K;
626 	} else {
627 		cmd = HSFSTS_CTL_FCYCLE_ERASE;
628 		erase_size = SZ_4K;
629 	}
630 
631 	while (len > 0) {
632 		writel(offs, ispi->base + FADDR);
633 
634 		val = readl(ispi->base + HSFSTS_CTL);
635 		val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
636 		val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
637 		val |= cmd;
638 		val |= HSFSTS_CTL_FGO;
639 		writel(val, ispi->base + HSFSTS_CTL);
640 
641 		ret = intel_spi_wait_hw_busy(ispi);
642 		if (ret)
643 			return ret;
644 
645 		status = readl(ispi->base + HSFSTS_CTL);
646 		if (status & HSFSTS_CTL_FCERR)
647 			return -EIO;
648 		else if (status & HSFSTS_CTL_AEL)
649 			return -EACCES;
650 
651 		offs += erase_size;
652 		len -= erase_size;
653 	}
654 
655 	return 0;
656 }
657 
intel_spi_is_protected(const struct intel_spi * ispi,unsigned int base,unsigned int limit)658 static bool intel_spi_is_protected(const struct intel_spi *ispi,
659 				   unsigned int base, unsigned int limit)
660 {
661 	int i;
662 
663 	for (i = 0; i < PR_NUM; i++) {
664 		u32 pr_base, pr_limit, pr_value;
665 
666 		pr_value = readl(ispi->pregs + PR(i));
667 		if (!(pr_value & (PR_WPE | PR_RPE)))
668 			continue;
669 
670 		pr_limit = (pr_value & PR_LIMIT_MASK) >> PR_LIMIT_SHIFT;
671 		pr_base = pr_value & PR_BASE_MASK;
672 
673 		if (pr_base >= base && pr_limit <= limit)
674 			return true;
675 	}
676 
677 	return false;
678 }
679 
680 /*
681  * There will be a single partition holding all enabled flash regions. We
682  * call this "BIOS".
683  */
intel_spi_fill_partition(struct intel_spi * ispi,struct mtd_partition * part)684 static void intel_spi_fill_partition(struct intel_spi *ispi,
685 				     struct mtd_partition *part)
686 {
687 	u64 end;
688 	int i;
689 
690 	memset(part, 0, sizeof(*part));
691 
692 	/* Start from the mandatory descriptor region */
693 	part->size = 4096;
694 	part->name = "BIOS";
695 
696 	/*
697 	 * Now try to find where this partition ends based on the flash
698 	 * region registers.
699 	 */
700 	for (i = 1; i < ispi->nregions; i++) {
701 		u32 region, base, limit;
702 
703 		region = readl(ispi->base + FREG(i));
704 		base = region & FREG_BASE_MASK;
705 		limit = (region & FREG_LIMIT_MASK) >> FREG_LIMIT_SHIFT;
706 
707 		if (base >= limit || limit == 0)
708 			continue;
709 
710 		/*
711 		 * If any of the regions have protection bits set, make the
712 		 * whole partition read-only to be on the safe side.
713 		 */
714 		if (intel_spi_is_protected(ispi, base, limit))
715 			ispi->writeable = false;
716 
717 		end = (limit << 12) + 4096;
718 		if (end > part->size)
719 			part->size = end;
720 	}
721 }
722 
intel_spi_probe(struct device * dev,struct resource * mem,const struct intel_spi_boardinfo * info)723 struct intel_spi *intel_spi_probe(struct device *dev,
724 	struct resource *mem, const struct intel_spi_boardinfo *info)
725 {
726 	const struct spi_nor_hwcaps hwcaps = {
727 		.mask = SNOR_HWCAPS_READ |
728 			SNOR_HWCAPS_READ_FAST |
729 			SNOR_HWCAPS_PP,
730 	};
731 	struct mtd_partition part;
732 	struct intel_spi *ispi;
733 	int ret;
734 
735 	if (!info || !mem)
736 		return ERR_PTR(-EINVAL);
737 
738 	ispi = devm_kzalloc(dev, sizeof(*ispi), GFP_KERNEL);
739 	if (!ispi)
740 		return ERR_PTR(-ENOMEM);
741 
742 	ispi->base = devm_ioremap_resource(dev, mem);
743 	if (IS_ERR(ispi->base))
744 		return ERR_CAST(ispi->base);
745 
746 	ispi->dev = dev;
747 	ispi->info = info;
748 	ispi->writeable = info->writeable;
749 
750 	ret = intel_spi_init(ispi);
751 	if (ret)
752 		return ERR_PTR(ret);
753 
754 	ispi->nor.dev = ispi->dev;
755 	ispi->nor.priv = ispi;
756 	ispi->nor.read_reg = intel_spi_read_reg;
757 	ispi->nor.write_reg = intel_spi_write_reg;
758 	ispi->nor.read = intel_spi_read;
759 	ispi->nor.write = intel_spi_write;
760 	ispi->nor.erase = intel_spi_erase;
761 
762 	ret = spi_nor_scan(&ispi->nor, NULL, &hwcaps);
763 	if (ret) {
764 		dev_info(dev, "failed to locate the chip\n");
765 		return ERR_PTR(ret);
766 	}
767 
768 	intel_spi_fill_partition(ispi, &part);
769 
770 	/* Prevent writes if not explicitly enabled */
771 	if (!ispi->writeable || !writeable)
772 		ispi->nor.mtd.flags &= ~MTD_WRITEABLE;
773 
774 	ret = mtd_device_parse_register(&ispi->nor.mtd, NULL, NULL, &part, 1);
775 	if (ret)
776 		return ERR_PTR(ret);
777 
778 	return ispi;
779 }
780 EXPORT_SYMBOL_GPL(intel_spi_probe);
781 
intel_spi_remove(struct intel_spi * ispi)782 int intel_spi_remove(struct intel_spi *ispi)
783 {
784 	return mtd_device_unregister(&ispi->nor.mtd);
785 }
786 EXPORT_SYMBOL_GPL(intel_spi_remove);
787 
788 MODULE_DESCRIPTION("Intel PCH/PCU SPI flash core driver");
789 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
790 MODULE_LICENSE("GPL v2");
791