• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * LPDDR flash memory device operations. This module provides read, write,
4  * erase, lock/unlock support for LPDDR flash memories
5  * (C) 2008 Korolev Alexey <akorolev@infradead.org>
6  * (C) 2008 Vasiliy Leonenko <vasiliy.leonenko@gmail.com>
7  * Many thanks to Roman Borisov for initial enabling
8  *
9  * TODO:
10  * Implement VPP management
11  * Implement XIP support
12  * Implement OTP support
13  */
14 #include <linux/mtd/pfow.h>
15 #include <linux/mtd/qinfo.h>
16 #include <linux/slab.h>
17 #include <linux/module.h>
18 
19 static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
20 					size_t *retlen, u_char *buf);
21 static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to,
22 				size_t len, size_t *retlen, const u_char *buf);
23 static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs,
24 				unsigned long count, loff_t to, size_t *retlen);
25 static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr);
26 static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
27 static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
28 static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
29 			size_t *retlen, void **mtdbuf, resource_size_t *phys);
30 static int lpddr_unpoint(struct mtd_info *mtd, loff_t adr, size_t len);
31 static int get_chip(struct map_info *map, struct flchip *chip, int mode);
32 static int chip_ready(struct map_info *map, struct flchip *chip, int mode);
33 static void put_chip(struct map_info *map, struct flchip *chip);
34 
lpddr_cmdset(struct map_info * map)35 struct mtd_info *lpddr_cmdset(struct map_info *map)
36 {
37 	struct lpddr_private *lpddr = map->fldrv_priv;
38 	struct flchip_shared *shared;
39 	struct flchip *chip;
40 	struct mtd_info *mtd;
41 	int numchips;
42 	int i, j;
43 
44 	mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
45 	if (!mtd)
46 		return NULL;
47 	mtd->priv = map;
48 	mtd->type = MTD_NORFLASH;
49 
50 	/* Fill in the default mtd operations */
51 	mtd->_read = lpddr_read;
52 	mtd->type = MTD_NORFLASH;
53 	mtd->flags = MTD_CAP_NORFLASH;
54 	mtd->flags &= ~MTD_BIT_WRITEABLE;
55 	mtd->_erase = lpddr_erase;
56 	mtd->_write = lpddr_write_buffers;
57 	mtd->_writev = lpddr_writev;
58 	mtd->_lock = lpddr_lock;
59 	mtd->_unlock = lpddr_unlock;
60 	if (map_is_linear(map)) {
61 		mtd->_point = lpddr_point;
62 		mtd->_unpoint = lpddr_unpoint;
63 	}
64 	mtd->size = 1 << lpddr->qinfo->DevSizeShift;
65 	mtd->erasesize = 1 << lpddr->qinfo->UniformBlockSizeShift;
66 	mtd->writesize = 1 << lpddr->qinfo->BufSizeShift;
67 
68 	shared = kmalloc_array(lpddr->numchips, sizeof(struct flchip_shared),
69 						GFP_KERNEL);
70 	if (!shared) {
71 		kfree(mtd);
72 		return NULL;
73 	}
74 
75 	chip = &lpddr->chips[0];
76 	numchips = lpddr->numchips / lpddr->qinfo->HWPartsNum;
77 	for (i = 0; i < numchips; i++) {
78 		shared[i].writing = shared[i].erasing = NULL;
79 		mutex_init(&shared[i].lock);
80 		for (j = 0; j < lpddr->qinfo->HWPartsNum; j++) {
81 			*chip = lpddr->chips[i];
82 			chip->start += j << lpddr->chipshift;
83 			chip->oldstate = chip->state = FL_READY;
84 			chip->priv = &shared[i];
85 			/* those should be reset too since
86 			   they create memory references. */
87 			init_waitqueue_head(&chip->wq);
88 			mutex_init(&chip->mutex);
89 			chip++;
90 		}
91 	}
92 
93 	return mtd;
94 }
95 EXPORT_SYMBOL(lpddr_cmdset);
96 
wait_for_ready(struct map_info * map,struct flchip * chip,unsigned int chip_op_time)97 static int wait_for_ready(struct map_info *map, struct flchip *chip,
98 		unsigned int chip_op_time)
99 {
100 	unsigned int timeo, reset_timeo, sleep_time;
101 	unsigned int dsr;
102 	flstate_t chip_state = chip->state;
103 	int ret = 0;
104 
105 	/* set our timeout to 8 times the expected delay */
106 	timeo = chip_op_time * 8;
107 	if (!timeo)
108 		timeo = 500000;
109 	reset_timeo = timeo;
110 	sleep_time = chip_op_time / 2;
111 
112 	for (;;) {
113 		dsr = CMDVAL(map_read(map, map->pfow_base + PFOW_DSR));
114 		if (dsr & DSR_READY_STATUS)
115 			break;
116 		if (!timeo) {
117 			printk(KERN_ERR "%s: Flash timeout error state %d \n",
118 							map->name, chip_state);
119 			ret = -ETIME;
120 			break;
121 		}
122 
123 		/* OK Still waiting. Drop the lock, wait a while and retry. */
124 		mutex_unlock(&chip->mutex);
125 		if (sleep_time >= 1000000/HZ) {
126 			/*
127 			 * Half of the normal delay still remaining
128 			 * can be performed with a sleeping delay instead
129 			 * of busy waiting.
130 			 */
131 			msleep(sleep_time/1000);
132 			timeo -= sleep_time;
133 			sleep_time = 1000000/HZ;
134 		} else {
135 			udelay(1);
136 			cond_resched();
137 			timeo--;
138 		}
139 		mutex_lock(&chip->mutex);
140 
141 		while (chip->state != chip_state) {
142 			/* Someone's suspended the operation: sleep */
143 			DECLARE_WAITQUEUE(wait, current);
144 			set_current_state(TASK_UNINTERRUPTIBLE);
145 			add_wait_queue(&chip->wq, &wait);
146 			mutex_unlock(&chip->mutex);
147 			schedule();
148 			remove_wait_queue(&chip->wq, &wait);
149 			mutex_lock(&chip->mutex);
150 		}
151 		if (chip->erase_suspended || chip->write_suspended)  {
152 			/* Suspend has occurred while sleep: reset timeout */
153 			timeo = reset_timeo;
154 			chip->erase_suspended = chip->write_suspended = 0;
155 		}
156 	}
157 	/* check status for errors */
158 	if (dsr & DSR_ERR) {
159 		/* Clear DSR*/
160 		map_write(map, CMD(~(DSR_ERR)), map->pfow_base + PFOW_DSR);
161 		printk(KERN_WARNING"%s: Bad status on wait: 0x%x \n",
162 				map->name, dsr);
163 		print_drs_error(dsr);
164 		ret = -EIO;
165 	}
166 	chip->state = FL_READY;
167 	return ret;
168 }
169 
get_chip(struct map_info * map,struct flchip * chip,int mode)170 static int get_chip(struct map_info *map, struct flchip *chip, int mode)
171 {
172 	int ret;
173 	DECLARE_WAITQUEUE(wait, current);
174 
175  retry:
176 	if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING)
177 		&& chip->state != FL_SYNCING) {
178 		/*
179 		 * OK. We have possibility for contension on the write/erase
180 		 * operations which are global to the real chip and not per
181 		 * partition.  So let's fight it over in the partition which
182 		 * currently has authority on the operation.
183 		 *
184 		 * The rules are as follows:
185 		 *
186 		 * - any write operation must own shared->writing.
187 		 *
188 		 * - any erase operation must own _both_ shared->writing and
189 		 *   shared->erasing.
190 		 *
191 		 * - contension arbitration is handled in the owner's context.
192 		 *
193 		 * The 'shared' struct can be read and/or written only when
194 		 * its lock is taken.
195 		 */
196 		struct flchip_shared *shared = chip->priv;
197 		struct flchip *contender;
198 		mutex_lock(&shared->lock);
199 		contender = shared->writing;
200 		if (contender && contender != chip) {
201 			/*
202 			 * The engine to perform desired operation on this
203 			 * partition is already in use by someone else.
204 			 * Let's fight over it in the context of the chip
205 			 * currently using it.  If it is possible to suspend,
206 			 * that other partition will do just that, otherwise
207 			 * it'll happily send us to sleep.  In any case, when
208 			 * get_chip returns success we're clear to go ahead.
209 			 */
210 			ret = mutex_trylock(&contender->mutex);
211 			mutex_unlock(&shared->lock);
212 			if (!ret)
213 				goto retry;
214 			mutex_unlock(&chip->mutex);
215 			ret = chip_ready(map, contender, mode);
216 			mutex_lock(&chip->mutex);
217 
218 			if (ret == -EAGAIN) {
219 				mutex_unlock(&contender->mutex);
220 				goto retry;
221 			}
222 			if (ret) {
223 				mutex_unlock(&contender->mutex);
224 				return ret;
225 			}
226 			mutex_lock(&shared->lock);
227 
228 			/* We should not own chip if it is already in FL_SYNCING
229 			 * state. Put contender and retry. */
230 			if (chip->state == FL_SYNCING) {
231 				put_chip(map, contender);
232 				mutex_unlock(&contender->mutex);
233 				goto retry;
234 			}
235 			mutex_unlock(&contender->mutex);
236 		}
237 
238 		/* Check if we have suspended erase on this chip.
239 		   Must sleep in such a case. */
240 		if (mode == FL_ERASING && shared->erasing
241 		    && shared->erasing->oldstate == FL_ERASING) {
242 			mutex_unlock(&shared->lock);
243 			set_current_state(TASK_UNINTERRUPTIBLE);
244 			add_wait_queue(&chip->wq, &wait);
245 			mutex_unlock(&chip->mutex);
246 			schedule();
247 			remove_wait_queue(&chip->wq, &wait);
248 			mutex_lock(&chip->mutex);
249 			goto retry;
250 		}
251 
252 		/* We now own it */
253 		shared->writing = chip;
254 		if (mode == FL_ERASING)
255 			shared->erasing = chip;
256 		mutex_unlock(&shared->lock);
257 	}
258 
259 	ret = chip_ready(map, chip, mode);
260 	if (ret == -EAGAIN)
261 		goto retry;
262 
263 	return ret;
264 }
265 
chip_ready(struct map_info * map,struct flchip * chip,int mode)266 static int chip_ready(struct map_info *map, struct flchip *chip, int mode)
267 {
268 	struct lpddr_private *lpddr = map->fldrv_priv;
269 	int ret = 0;
270 	DECLARE_WAITQUEUE(wait, current);
271 
272 	/* Prevent setting state FL_SYNCING for chip in suspended state. */
273 	if (FL_SYNCING == mode && FL_READY != chip->oldstate)
274 		goto sleep;
275 
276 	switch (chip->state) {
277 	case FL_READY:
278 	case FL_JEDEC_QUERY:
279 		return 0;
280 
281 	case FL_ERASING:
282 		if (!lpddr->qinfo->SuspEraseSupp ||
283 			!(mode == FL_READY || mode == FL_POINT))
284 			goto sleep;
285 
286 		map_write(map, CMD(LPDDR_SUSPEND),
287 			map->pfow_base + PFOW_PROGRAM_ERASE_SUSPEND);
288 		chip->oldstate = FL_ERASING;
289 		chip->state = FL_ERASE_SUSPENDING;
290 		ret = wait_for_ready(map, chip, 0);
291 		if (ret) {
292 			/* Oops. something got wrong. */
293 			/* Resume and pretend we weren't here.  */
294 			put_chip(map, chip);
295 			printk(KERN_ERR "%s: suspend operation failed."
296 					"State may be wrong \n", map->name);
297 			return -EIO;
298 		}
299 		chip->erase_suspended = 1;
300 		chip->state = FL_READY;
301 		return 0;
302 		/* Erase suspend */
303 	case FL_POINT:
304 		/* Only if there's no operation suspended... */
305 		if (mode == FL_READY && chip->oldstate == FL_READY)
306 			return 0;
307 		/* fall through */
308 
309 	default:
310 sleep:
311 		set_current_state(TASK_UNINTERRUPTIBLE);
312 		add_wait_queue(&chip->wq, &wait);
313 		mutex_unlock(&chip->mutex);
314 		schedule();
315 		remove_wait_queue(&chip->wq, &wait);
316 		mutex_lock(&chip->mutex);
317 		return -EAGAIN;
318 	}
319 }
320 
put_chip(struct map_info * map,struct flchip * chip)321 static void put_chip(struct map_info *map, struct flchip *chip)
322 {
323 	if (chip->priv) {
324 		struct flchip_shared *shared = chip->priv;
325 		mutex_lock(&shared->lock);
326 		if (shared->writing == chip && chip->oldstate == FL_READY) {
327 			/* We own the ability to write, but we're done */
328 			shared->writing = shared->erasing;
329 			if (shared->writing && shared->writing != chip) {
330 				/* give back the ownership */
331 				struct flchip *loaner = shared->writing;
332 				mutex_lock(&loaner->mutex);
333 				mutex_unlock(&shared->lock);
334 				mutex_unlock(&chip->mutex);
335 				put_chip(map, loaner);
336 				mutex_lock(&chip->mutex);
337 				mutex_unlock(&loaner->mutex);
338 				wake_up(&chip->wq);
339 				return;
340 			}
341 			shared->erasing = NULL;
342 			shared->writing = NULL;
343 		} else if (shared->erasing == chip && shared->writing != chip) {
344 			/*
345 			 * We own the ability to erase without the ability
346 			 * to write, which means the erase was suspended
347 			 * and some other partition is currently writing.
348 			 * Don't let the switch below mess things up since
349 			 * we don't have ownership to resume anything.
350 			 */
351 			mutex_unlock(&shared->lock);
352 			wake_up(&chip->wq);
353 			return;
354 		}
355 		mutex_unlock(&shared->lock);
356 	}
357 
358 	switch (chip->oldstate) {
359 	case FL_ERASING:
360 		map_write(map, CMD(LPDDR_RESUME),
361 				map->pfow_base + PFOW_COMMAND_CODE);
362 		map_write(map, CMD(LPDDR_START_EXECUTION),
363 				map->pfow_base + PFOW_COMMAND_EXECUTE);
364 		chip->oldstate = FL_READY;
365 		chip->state = FL_ERASING;
366 		break;
367 	case FL_READY:
368 		break;
369 	default:
370 		printk(KERN_ERR "%s: put_chip() called with oldstate %d!\n",
371 				map->name, chip->oldstate);
372 	}
373 	wake_up(&chip->wq);
374 }
375 
do_write_buffer(struct map_info * map,struct flchip * chip,unsigned long adr,const struct kvec ** pvec,unsigned long * pvec_seek,int len)376 static int do_write_buffer(struct map_info *map, struct flchip *chip,
377 			unsigned long adr, const struct kvec **pvec,
378 			unsigned long *pvec_seek, int len)
379 {
380 	struct lpddr_private *lpddr = map->fldrv_priv;
381 	map_word datum;
382 	int ret, wbufsize, word_gap, words;
383 	const struct kvec *vec;
384 	unsigned long vec_seek;
385 	unsigned long prog_buf_ofs;
386 
387 	wbufsize = 1 << lpddr->qinfo->BufSizeShift;
388 
389 	mutex_lock(&chip->mutex);
390 	ret = get_chip(map, chip, FL_WRITING);
391 	if (ret) {
392 		mutex_unlock(&chip->mutex);
393 		return ret;
394 	}
395 	/* Figure out the number of words to write */
396 	word_gap = (-adr & (map_bankwidth(map)-1));
397 	words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
398 	if (!word_gap) {
399 		words--;
400 	} else {
401 		word_gap = map_bankwidth(map) - word_gap;
402 		adr -= word_gap;
403 		datum = map_word_ff(map);
404 	}
405 	/* Write data */
406 	/* Get the program buffer offset from PFOW register data first*/
407 	prog_buf_ofs = map->pfow_base + CMDVAL(map_read(map,
408 				map->pfow_base + PFOW_PROGRAM_BUFFER_OFFSET));
409 	vec = *pvec;
410 	vec_seek = *pvec_seek;
411 	do {
412 		int n = map_bankwidth(map) - word_gap;
413 
414 		if (n > vec->iov_len - vec_seek)
415 			n = vec->iov_len - vec_seek;
416 		if (n > len)
417 			n = len;
418 
419 		if (!word_gap && (len < map_bankwidth(map)))
420 			datum = map_word_ff(map);
421 
422 		datum = map_word_load_partial(map, datum,
423 				vec->iov_base + vec_seek, word_gap, n);
424 
425 		len -= n;
426 		word_gap += n;
427 		if (!len || word_gap == map_bankwidth(map)) {
428 			map_write(map, datum, prog_buf_ofs);
429 			prog_buf_ofs += map_bankwidth(map);
430 			word_gap = 0;
431 		}
432 
433 		vec_seek += n;
434 		if (vec_seek == vec->iov_len) {
435 			vec++;
436 			vec_seek = 0;
437 		}
438 	} while (len);
439 	*pvec = vec;
440 	*pvec_seek = vec_seek;
441 
442 	/* GO GO GO */
443 	send_pfow_command(map, LPDDR_BUFF_PROGRAM, adr, wbufsize, NULL);
444 	chip->state = FL_WRITING;
445 	ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->ProgBufferTime));
446 	if (ret)	{
447 		printk(KERN_WARNING"%s Buffer program error: %d at %lx; \n",
448 			map->name, ret, adr);
449 		goto out;
450 	}
451 
452  out:	put_chip(map, chip);
453 	mutex_unlock(&chip->mutex);
454 	return ret;
455 }
456 
do_erase_oneblock(struct mtd_info * mtd,loff_t adr)457 static int do_erase_oneblock(struct mtd_info *mtd, loff_t adr)
458 {
459 	struct map_info *map = mtd->priv;
460 	struct lpddr_private *lpddr = map->fldrv_priv;
461 	int chipnum = adr >> lpddr->chipshift;
462 	struct flchip *chip = &lpddr->chips[chipnum];
463 	int ret;
464 
465 	mutex_lock(&chip->mutex);
466 	ret = get_chip(map, chip, FL_ERASING);
467 	if (ret) {
468 		mutex_unlock(&chip->mutex);
469 		return ret;
470 	}
471 	send_pfow_command(map, LPDDR_BLOCK_ERASE, adr, 0, NULL);
472 	chip->state = FL_ERASING;
473 	ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->BlockEraseTime)*1000);
474 	if (ret) {
475 		printk(KERN_WARNING"%s Erase block error %d at : %llx\n",
476 			map->name, ret, adr);
477 		goto out;
478 	}
479  out:	put_chip(map, chip);
480 	mutex_unlock(&chip->mutex);
481 	return ret;
482 }
483 
lpddr_read(struct mtd_info * mtd,loff_t adr,size_t len,size_t * retlen,u_char * buf)484 static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
485 			size_t *retlen, u_char *buf)
486 {
487 	struct map_info *map = mtd->priv;
488 	struct lpddr_private *lpddr = map->fldrv_priv;
489 	int chipnum = adr >> lpddr->chipshift;
490 	struct flchip *chip = &lpddr->chips[chipnum];
491 	int ret = 0;
492 
493 	mutex_lock(&chip->mutex);
494 	ret = get_chip(map, chip, FL_READY);
495 	if (ret) {
496 		mutex_unlock(&chip->mutex);
497 		return ret;
498 	}
499 
500 	map_copy_from(map, buf, adr, len);
501 	*retlen = len;
502 
503 	put_chip(map, chip);
504 	mutex_unlock(&chip->mutex);
505 	return ret;
506 }
507 
lpddr_point(struct mtd_info * mtd,loff_t adr,size_t len,size_t * retlen,void ** mtdbuf,resource_size_t * phys)508 static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
509 			size_t *retlen, void **mtdbuf, resource_size_t *phys)
510 {
511 	struct map_info *map = mtd->priv;
512 	struct lpddr_private *lpddr = map->fldrv_priv;
513 	int chipnum = adr >> lpddr->chipshift;
514 	unsigned long ofs, last_end = 0;
515 	struct flchip *chip = &lpddr->chips[chipnum];
516 	int ret = 0;
517 
518 	if (!map->virt)
519 		return -EINVAL;
520 
521 	/* ofs: offset within the first chip that the first read should start */
522 	ofs = adr - (chipnum << lpddr->chipshift);
523 	*mtdbuf = (void *)map->virt + chip->start + ofs;
524 
525 	while (len) {
526 		unsigned long thislen;
527 
528 		if (chipnum >= lpddr->numchips)
529 			break;
530 
531 		/* We cannot point across chips that are virtually disjoint */
532 		if (!last_end)
533 			last_end = chip->start;
534 		else if (chip->start != last_end)
535 			break;
536 
537 		if ((len + ofs - 1) >> lpddr->chipshift)
538 			thislen = (1<<lpddr->chipshift) - ofs;
539 		else
540 			thislen = len;
541 		/* get the chip */
542 		mutex_lock(&chip->mutex);
543 		ret = get_chip(map, chip, FL_POINT);
544 		mutex_unlock(&chip->mutex);
545 		if (ret)
546 			break;
547 
548 		chip->state = FL_POINT;
549 		chip->ref_point_counter++;
550 		*retlen += thislen;
551 		len -= thislen;
552 
553 		ofs = 0;
554 		last_end += 1 << lpddr->chipshift;
555 		chipnum++;
556 		chip = &lpddr->chips[chipnum];
557 	}
558 	return 0;
559 }
560 
lpddr_unpoint(struct mtd_info * mtd,loff_t adr,size_t len)561 static int lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
562 {
563 	struct map_info *map = mtd->priv;
564 	struct lpddr_private *lpddr = map->fldrv_priv;
565 	int chipnum = adr >> lpddr->chipshift, err = 0;
566 	unsigned long ofs;
567 
568 	/* ofs: offset within the first chip that the first read should start */
569 	ofs = adr - (chipnum << lpddr->chipshift);
570 
571 	while (len) {
572 		unsigned long thislen;
573 		struct flchip *chip;
574 
575 		chip = &lpddr->chips[chipnum];
576 		if (chipnum >= lpddr->numchips)
577 			break;
578 
579 		if ((len + ofs - 1) >> lpddr->chipshift)
580 			thislen = (1<<lpddr->chipshift) - ofs;
581 		else
582 			thislen = len;
583 
584 		mutex_lock(&chip->mutex);
585 		if (chip->state == FL_POINT) {
586 			chip->ref_point_counter--;
587 			if (chip->ref_point_counter == 0)
588 				chip->state = FL_READY;
589 		} else {
590 			printk(KERN_WARNING "%s: Warning: unpoint called on non"
591 					"pointed region\n", map->name);
592 			err = -EINVAL;
593 		}
594 
595 		put_chip(map, chip);
596 		mutex_unlock(&chip->mutex);
597 
598 		len -= thislen;
599 		ofs = 0;
600 		chipnum++;
601 	}
602 
603 	return err;
604 }
605 
lpddr_write_buffers(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)606 static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
607 				size_t *retlen, const u_char *buf)
608 {
609 	struct kvec vec;
610 
611 	vec.iov_base = (void *) buf;
612 	vec.iov_len = len;
613 
614 	return lpddr_writev(mtd, &vec, 1, to, retlen);
615 }
616 
617 
lpddr_writev(struct mtd_info * mtd,const struct kvec * vecs,unsigned long count,loff_t to,size_t * retlen)618 static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs,
619 				unsigned long count, loff_t to, size_t *retlen)
620 {
621 	struct map_info *map = mtd->priv;
622 	struct lpddr_private *lpddr = map->fldrv_priv;
623 	int ret = 0;
624 	int chipnum;
625 	unsigned long ofs, vec_seek, i;
626 	int wbufsize = 1 << lpddr->qinfo->BufSizeShift;
627 	size_t len = 0;
628 
629 	for (i = 0; i < count; i++)
630 		len += vecs[i].iov_len;
631 
632 	if (!len)
633 		return 0;
634 
635 	chipnum = to >> lpddr->chipshift;
636 
637 	ofs = to;
638 	vec_seek = 0;
639 
640 	do {
641 		/* We must not cross write block boundaries */
642 		int size = wbufsize - (ofs & (wbufsize-1));
643 
644 		if (size > len)
645 			size = len;
646 
647 		ret = do_write_buffer(map, &lpddr->chips[chipnum],
648 					  ofs, &vecs, &vec_seek, size);
649 		if (ret)
650 			return ret;
651 
652 		ofs += size;
653 		(*retlen) += size;
654 		len -= size;
655 
656 		/* Be nice and reschedule with the chip in a usable
657 		 * state for other processes */
658 		cond_resched();
659 
660 	} while (len);
661 
662 	return 0;
663 }
664 
lpddr_erase(struct mtd_info * mtd,struct erase_info * instr)665 static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr)
666 {
667 	unsigned long ofs, len;
668 	int ret;
669 	struct map_info *map = mtd->priv;
670 	struct lpddr_private *lpddr = map->fldrv_priv;
671 	int size = 1 << lpddr->qinfo->UniformBlockSizeShift;
672 
673 	ofs = instr->addr;
674 	len = instr->len;
675 
676 	while (len > 0) {
677 		ret = do_erase_oneblock(mtd, ofs);
678 		if (ret)
679 			return ret;
680 		ofs += size;
681 		len -= size;
682 	}
683 
684 	return 0;
685 }
686 
687 #define DO_XXLOCK_LOCK		1
688 #define DO_XXLOCK_UNLOCK	2
do_xxlock(struct mtd_info * mtd,loff_t adr,uint32_t len,int thunk)689 static int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk)
690 {
691 	int ret = 0;
692 	struct map_info *map = mtd->priv;
693 	struct lpddr_private *lpddr = map->fldrv_priv;
694 	int chipnum = adr >> lpddr->chipshift;
695 	struct flchip *chip = &lpddr->chips[chipnum];
696 
697 	mutex_lock(&chip->mutex);
698 	ret = get_chip(map, chip, FL_LOCKING);
699 	if (ret) {
700 		mutex_unlock(&chip->mutex);
701 		return ret;
702 	}
703 
704 	if (thunk == DO_XXLOCK_LOCK) {
705 		send_pfow_command(map, LPDDR_LOCK_BLOCK, adr, adr + len, NULL);
706 		chip->state = FL_LOCKING;
707 	} else if (thunk == DO_XXLOCK_UNLOCK) {
708 		send_pfow_command(map, LPDDR_UNLOCK_BLOCK, adr, adr + len, NULL);
709 		chip->state = FL_UNLOCKING;
710 	} else
711 		BUG();
712 
713 	ret = wait_for_ready(map, chip, 1);
714 	if (ret)	{
715 		printk(KERN_ERR "%s: block unlock error status %d \n",
716 				map->name, ret);
717 		goto out;
718 	}
719 out:	put_chip(map, chip);
720 	mutex_unlock(&chip->mutex);
721 	return ret;
722 }
723 
lpddr_lock(struct mtd_info * mtd,loff_t ofs,uint64_t len)724 static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
725 {
726 	return do_xxlock(mtd, ofs, len, DO_XXLOCK_LOCK);
727 }
728 
lpddr_unlock(struct mtd_info * mtd,loff_t ofs,uint64_t len)729 static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
730 {
731 	return do_xxlock(mtd, ofs, len, DO_XXLOCK_UNLOCK);
732 }
733 
734 MODULE_LICENSE("GPL");
735 MODULE_AUTHOR("Alexey Korolev <akorolev@infradead.org>");
736 MODULE_DESCRIPTION("MTD driver for LPDDR flash chips");
737