• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
3  *
4  * Copyright (C) 2002-2011 Aleph One Ltd.
5  *   for Toby Churchill Ltd and Brightstar Engineering
6  *
7  * Created by Charles Manning <charles@aleph1.co.uk>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13 
14 #include "yportenv.h"
15 #include "yaffs_trace.h"
16 
17 #include "yaffs_guts.h"
18 #include "yaffs_getblockinfo.h"
19 #include "yaffs_tagscompat.h"
20 #include "yaffs_tagsmarshall.h"
21 #include "yaffs_nand.h"
22 #include "yaffs_yaffs1.h"
23 #include "yaffs_yaffs2.h"
24 #include "yaffs_bitmap.h"
25 #include "yaffs_verify.h"
26 #include "yaffs_nand.h"
27 #include "yaffs_packedtags2.h"
28 #include "yaffs_nameval.h"
29 #include "yaffs_allocator.h"
30 #include "yaffs_attribs.h"
31 #include "yaffs_summary.h"
32 
33 /* Note YAFFS_GC_GOOD_ENOUGH must be <= YAFFS_GC_PASSIVE_THRESHOLD */
34 #define YAFFS_GC_GOOD_ENOUGH 2
35 #define YAFFS_GC_PASSIVE_THRESHOLD 4
36 
37 #include "yaffs_ecc.h"
38 
39 /* Forward declarations */
40 
41 static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk,
42 			     const u8 *buffer, int n_bytes, int use_reserve);
43 
44 static void yaffs_fix_null_name(struct yaffs_obj *obj, YCHAR *name,
45 				int buffer_size);
46 
47 /* Function to calculate chunk and offset */
48 
yaffs_addr_to_chunk(struct yaffs_dev * dev,loff_t addr,int * chunk_out,u32 * offset_out)49 void yaffs_addr_to_chunk(struct yaffs_dev *dev, loff_t addr,
50 				int *chunk_out, u32 *offset_out)
51 {
52 	int chunk;
53 	u32 offset;
54 
55 	chunk = (u32) (addr >> dev->chunk_shift);
56 
57 	if (dev->chunk_div == 1) {
58 		/* easy power of 2 case */
59 		offset = (u32) (addr & dev->chunk_mask);
60 	} else {
61 		/* Non power-of-2 case */
62 
63 		loff_t chunk_base;
64 
65 		chunk /= dev->chunk_div;
66 
67 		chunk_base = ((loff_t) chunk) * dev->data_bytes_per_chunk;
68 		offset = (u32) (addr - chunk_base);
69 	}
70 
71 	*chunk_out = chunk;
72 	*offset_out = offset;
73 }
74 
75 /* Function to return the number of shifts for a power of 2 greater than or
76  * equal to the given number
77  * Note we don't try to cater for all possible numbers and this does not have to
78  * be hellishly efficient.
79  */
80 
calc_shifts_ceiling(u32 x)81 static inline u32 calc_shifts_ceiling(u32 x)
82 {
83 	int extra_bits;
84 	int shifts;
85 
86 	shifts = extra_bits = 0;
87 
88 	while (x > 1) {
89 		if (x & 1)
90 			extra_bits++;
91 		x >>= 1;
92 		shifts++;
93 	}
94 
95 	if (extra_bits)
96 		shifts++;
97 
98 	return shifts;
99 }
100 
101 /* Function to return the number of shifts to get a 1 in bit 0
102  */
103 
calc_shifts(u32 x)104 static inline u32 calc_shifts(u32 x)
105 {
106 	u32 shifts;
107 
108 	shifts = 0;
109 
110 	if (!x)
111 		return 0;
112 
113 	while (!(x & 1)) {
114 		x >>= 1;
115 		shifts++;
116 	}
117 
118 	return shifts;
119 }
120 
121 /*
122  * Temporary buffer manipulations.
123  */
124 
yaffs_init_tmp_buffers(struct yaffs_dev * dev)125 static int yaffs_init_tmp_buffers(struct yaffs_dev *dev)
126 {
127 	int i;
128 	u8 *buf = (u8 *) 1;
129 
130 	memset(dev->temp_buffer, 0, sizeof(dev->temp_buffer));
131 
132 	for (i = 0; buf && i < YAFFS_N_TEMP_BUFFERS; i++) {
133 		dev->temp_buffer[i].in_use = 0;
134 		buf = kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
135 		dev->temp_buffer[i].buffer = buf;
136 	}
137 
138 	return buf ? YAFFS_OK : YAFFS_FAIL;
139 }
140 
yaffs_get_temp_buffer(struct yaffs_dev * dev)141 u8 *yaffs_get_temp_buffer(struct yaffs_dev * dev)
142 {
143 	int i;
144 
145 	dev->temp_in_use++;
146 	if (dev->temp_in_use > dev->max_temp)
147 		dev->max_temp = dev->temp_in_use;
148 
149 	for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
150 		if (dev->temp_buffer[i].in_use == 0) {
151 			dev->temp_buffer[i].in_use = 1;
152 			return dev->temp_buffer[i].buffer;
153 		}
154 	}
155 
156 	yaffs_trace(YAFFS_TRACE_BUFFERS, "Out of temp buffers");
157 	/*
158 	 * If we got here then we have to allocate an unmanaged one
159 	 * This is not good.
160 	 */
161 
162 	dev->unmanaged_buffer_allocs++;
163 	return kmalloc(dev->data_bytes_per_chunk, GFP_NOFS);
164 
165 }
166 
yaffs_release_temp_buffer(struct yaffs_dev * dev,u8 * buffer)167 void yaffs_release_temp_buffer(struct yaffs_dev *dev, u8 *buffer)
168 {
169 	int i;
170 
171 	dev->temp_in_use--;
172 
173 	for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
174 		if (dev->temp_buffer[i].buffer == buffer) {
175 			dev->temp_buffer[i].in_use = 0;
176 			return;
177 		}
178 	}
179 
180 	if (buffer) {
181 		/* assume it is an unmanaged one. */
182 		yaffs_trace(YAFFS_TRACE_BUFFERS,
183 			"Releasing unmanaged temp buffer");
184 		kfree(buffer);
185 		dev->unmanaged_buffer_deallocs++;
186 	}
187 
188 }
189 
190 /*
191  * Functions for robustisizing TODO
192  *
193  */
194 
yaffs_handle_chunk_wr_ok(struct yaffs_dev * dev,int nand_chunk,const u8 * data,const struct yaffs_ext_tags * tags)195 static void yaffs_handle_chunk_wr_ok(struct yaffs_dev *dev, int nand_chunk,
196 				     const u8 *data,
197 				     const struct yaffs_ext_tags *tags)
198 {
199 	(void) dev;
200 	(void) nand_chunk;
201 	(void) data;
202 	(void) tags;
203 }
204 
yaffs_handle_chunk_update(struct yaffs_dev * dev,int nand_chunk,const struct yaffs_ext_tags * tags)205 static void yaffs_handle_chunk_update(struct yaffs_dev *dev, int nand_chunk,
206 				      const struct yaffs_ext_tags *tags)
207 {
208 	(void) dev;
209 	(void) nand_chunk;
210 	(void) tags;
211 }
212 
yaffs_handle_chunk_error(struct yaffs_dev * dev,struct yaffs_block_info * bi)213 void yaffs_handle_chunk_error(struct yaffs_dev *dev,
214 			      struct yaffs_block_info *bi)
215 {
216 	if (!bi->gc_prioritise) {
217 		bi->gc_prioritise = 1;
218 		dev->has_pending_prioritised_gc = 1;
219 		bi->chunk_error_strikes++;
220 
221 		if (bi->chunk_error_strikes > 3) {
222 			bi->needs_retiring = 1;	/* Too many stikes, so retire */
223 			yaffs_trace(YAFFS_TRACE_ALWAYS,
224 				"yaffs: Block struck out");
225 
226 		}
227 	}
228 }
229 
yaffs_handle_chunk_wr_error(struct yaffs_dev * dev,int nand_chunk,int erased_ok)230 static void yaffs_handle_chunk_wr_error(struct yaffs_dev *dev, int nand_chunk,
231 					int erased_ok)
232 {
233 	int flash_block = nand_chunk / dev->param.chunks_per_block;
234 	struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block);
235 
236 	yaffs_handle_chunk_error(dev, bi);
237 
238 	if (erased_ok) {
239 		/* Was an actual write failure,
240 		 * so mark the block for retirement.*/
241 		bi->needs_retiring = 1;
242 		yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
243 		  "**>> Block %d needs retiring", flash_block);
244 	}
245 
246 	/* Delete the chunk */
247 	yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
248 	yaffs_skip_rest_of_block(dev);
249 }
250 
251 /*
252  * Verification code
253  */
254 
255 /*
256  *  Simple hash function. Needs to have a reasonable spread
257  */
258 
yaffs_hash_fn(int n)259 static inline int yaffs_hash_fn(int n)
260 {
261 	if (n < 0)
262 		n = -n;
263 	return n % YAFFS_NOBJECT_BUCKETS;
264 }
265 
266 /*
267  * Access functions to useful fake objects.
268  * Note that root might have a presence in NAND if permissions are set.
269  */
270 
yaffs_root(struct yaffs_dev * dev)271 struct yaffs_obj *yaffs_root(struct yaffs_dev *dev)
272 {
273 	return dev->root_dir;
274 }
275 
yaffs_lost_n_found(struct yaffs_dev * dev)276 struct yaffs_obj *yaffs_lost_n_found(struct yaffs_dev *dev)
277 {
278 	return dev->lost_n_found;
279 }
280 
281 /*
282  *  Erased NAND checking functions
283  */
284 
yaffs_check_ff(u8 * buffer,int n_bytes)285 int yaffs_check_ff(u8 *buffer, int n_bytes)
286 {
287 	/* Horrible, slow implementation */
288 	while (n_bytes--) {
289 		if (*buffer != 0xff)
290 			return 0;
291 		buffer++;
292 	}
293 	return 1;
294 }
295 
yaffs_check_chunk_erased(struct yaffs_dev * dev,int nand_chunk)296 static int yaffs_check_chunk_erased(struct yaffs_dev *dev, int nand_chunk)
297 {
298 	int retval = YAFFS_OK;
299 	u8 *data = yaffs_get_temp_buffer(dev);
300 	struct yaffs_ext_tags tags;
301 	int result;
302 
303 	result = yaffs_rd_chunk_tags_nand(dev, nand_chunk, data, &tags);
304 
305 	if (tags.ecc_result > YAFFS_ECC_RESULT_NO_ERROR)
306 		retval = YAFFS_FAIL;
307 
308 	if (!yaffs_check_ff(data, dev->data_bytes_per_chunk) ||
309 		tags.chunk_used) {
310 		yaffs_trace(YAFFS_TRACE_NANDACCESS,
311 			"Chunk %d not erased", nand_chunk);
312 		retval = YAFFS_FAIL;
313 	}
314 
315 	yaffs_release_temp_buffer(dev, data);
316 
317 	return retval;
318 
319 }
320 
yaffs_verify_chunk_written(struct yaffs_dev * dev,int nand_chunk,const u8 * data,struct yaffs_ext_tags * tags)321 static int yaffs_verify_chunk_written(struct yaffs_dev *dev,
322 				      int nand_chunk,
323 				      const u8 *data,
324 				      struct yaffs_ext_tags *tags)
325 {
326 	int retval = YAFFS_OK;
327 	struct yaffs_ext_tags temp_tags;
328 	u8 *buffer = yaffs_get_temp_buffer(dev);
329 	int result;
330 
331 	result = yaffs_rd_chunk_tags_nand(dev, nand_chunk, buffer, &temp_tags);
332 	if (memcmp(buffer, data, dev->data_bytes_per_chunk) ||
333 	    temp_tags.obj_id != tags->obj_id ||
334 	    temp_tags.chunk_id != tags->chunk_id ||
335 	    temp_tags.n_bytes != tags->n_bytes)
336 		retval = YAFFS_FAIL;
337 
338 	yaffs_release_temp_buffer(dev, buffer);
339 
340 	return retval;
341 }
342 
343 
yaffs_check_alloc_available(struct yaffs_dev * dev,int n_chunks)344 int yaffs_check_alloc_available(struct yaffs_dev *dev, int n_chunks)
345 {
346 	int reserved_chunks;
347 	int reserved_blocks = dev->param.n_reserved_blocks;
348 	int checkpt_blocks;
349 
350 	checkpt_blocks = yaffs_calc_checkpt_blocks_required(dev);
351 
352 	reserved_chunks =
353 	    (reserved_blocks + checkpt_blocks) * dev->param.chunks_per_block;
354 
355 	return (dev->n_free_chunks > (reserved_chunks + n_chunks));
356 }
357 
yaffs_find_alloc_block(struct yaffs_dev * dev)358 static int yaffs_find_alloc_block(struct yaffs_dev *dev)
359 {
360 	int i;
361 	struct yaffs_block_info *bi;
362 
363 	if (dev->n_erased_blocks < 1) {
364 		/* Hoosterman we've got a problem.
365 		 * Can't get space to gc
366 		 */
367 		yaffs_trace(YAFFS_TRACE_ERROR,
368 		  "yaffs tragedy: no more erased blocks");
369 
370 		return -1;
371 	}
372 
373 	/* Find an empty block. */
374 
375 	for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
376 		dev->alloc_block_finder++;
377 		if (dev->alloc_block_finder < dev->internal_start_block
378 		    || dev->alloc_block_finder > dev->internal_end_block) {
379 			dev->alloc_block_finder = dev->internal_start_block;
380 		}
381 
382 		bi = yaffs_get_block_info(dev, dev->alloc_block_finder);
383 
384 		if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) {
385 			bi->block_state = YAFFS_BLOCK_STATE_ALLOCATING;
386 			dev->seq_number++;
387 			bi->seq_number = dev->seq_number;
388 			dev->n_erased_blocks--;
389 			yaffs_trace(YAFFS_TRACE_ALLOCATE,
390 			  "Allocated block %d, seq  %d, %d left" ,
391 			   dev->alloc_block_finder, dev->seq_number,
392 			   dev->n_erased_blocks);
393 			return dev->alloc_block_finder;
394 		}
395 	}
396 
397 	yaffs_trace(YAFFS_TRACE_ALWAYS,
398 		"yaffs tragedy: no more erased blocks, but there should have been %d",
399 		dev->n_erased_blocks);
400 
401 	return -1;
402 }
403 
yaffs_alloc_chunk(struct yaffs_dev * dev,int use_reserver,struct yaffs_block_info ** block_ptr)404 static int yaffs_alloc_chunk(struct yaffs_dev *dev, int use_reserver,
405 			     struct yaffs_block_info **block_ptr)
406 {
407 	int ret_val;
408 	struct yaffs_block_info *bi;
409 
410 	if (dev->alloc_block < 0) {
411 		/* Get next block to allocate off */
412 		dev->alloc_block = yaffs_find_alloc_block(dev);
413 		dev->alloc_page = 0;
414 	}
415 
416 	if (!use_reserver && !yaffs_check_alloc_available(dev, 1)) {
417 		/* No space unless we're allowed to use the reserve. */
418 		return -1;
419 	}
420 
421 	if (dev->n_erased_blocks < dev->param.n_reserved_blocks
422 	    && dev->alloc_page == 0)
423 		yaffs_trace(YAFFS_TRACE_ALLOCATE, "Allocating reserve");
424 
425 	/* Next page please.... */
426 	if (dev->alloc_block >= 0) {
427 		bi = yaffs_get_block_info(dev, dev->alloc_block);
428 
429 		ret_val = (dev->alloc_block * dev->param.chunks_per_block) +
430 		    dev->alloc_page;
431 		bi->pages_in_use++;
432 		yaffs_set_chunk_bit(dev, dev->alloc_block, dev->alloc_page);
433 
434 		dev->alloc_page++;
435 
436 		dev->n_free_chunks--;
437 
438 		/* If the block is full set the state to full */
439 		if (dev->alloc_page >= dev->param.chunks_per_block) {
440 			bi->block_state = YAFFS_BLOCK_STATE_FULL;
441 			dev->alloc_block = -1;
442 		}
443 
444 		if (block_ptr)
445 			*block_ptr = bi;
446 
447 		return ret_val;
448 	}
449 
450 	yaffs_trace(YAFFS_TRACE_ERROR,
451 		"!!!!!!!!! Allocator out !!!!!!!!!!!!!!!!!");
452 
453 	return -1;
454 }
455 
yaffs_get_erased_chunks(struct yaffs_dev * dev)456 static int yaffs_get_erased_chunks(struct yaffs_dev *dev)
457 {
458 	int n;
459 
460 	n = dev->n_erased_blocks * dev->param.chunks_per_block;
461 
462 	if (dev->alloc_block > 0)
463 		n += (dev->param.chunks_per_block - dev->alloc_page);
464 
465 	return n;
466 
467 }
468 
469 /*
470  * yaffs_skip_rest_of_block() skips over the rest of the allocation block
471  * if we don't want to write to it.
472  */
yaffs_skip_rest_of_block(struct yaffs_dev * dev)473 void yaffs_skip_rest_of_block(struct yaffs_dev *dev)
474 {
475 	struct yaffs_block_info *bi;
476 
477 	if (dev->alloc_block > 0) {
478 		bi = yaffs_get_block_info(dev, dev->alloc_block);
479 		if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING) {
480 			bi->block_state = YAFFS_BLOCK_STATE_FULL;
481 			dev->alloc_block = -1;
482 		}
483 	}
484 }
485 
yaffs_write_new_chunk(struct yaffs_dev * dev,const u8 * data,struct yaffs_ext_tags * tags,int use_reserver)486 static int yaffs_write_new_chunk(struct yaffs_dev *dev,
487 				 const u8 *data,
488 				 struct yaffs_ext_tags *tags, int use_reserver)
489 {
490 	int attempts = 0;
491 	int write_ok = 0;
492 	int chunk;
493 
494 	yaffs2_checkpt_invalidate(dev);
495 
496 	do {
497 		struct yaffs_block_info *bi = 0;
498 		int erased_ok = 0;
499 
500 		chunk = yaffs_alloc_chunk(dev, use_reserver, &bi);
501 		if (chunk < 0) {
502 			/* no space */
503 			break;
504 		}
505 
506 		/* First check this chunk is erased, if it needs
507 		 * checking.  The checking policy (unless forced
508 		 * always on) is as follows:
509 		 *
510 		 * Check the first page we try to write in a block.
511 		 * If the check passes then we don't need to check any
512 		 * more.        If the check fails, we check again...
513 		 * If the block has been erased, we don't need to check.
514 		 *
515 		 * However, if the block has been prioritised for gc,
516 		 * then we think there might be something odd about
517 		 * this block and stop using it.
518 		 *
519 		 * Rationale: We should only ever see chunks that have
520 		 * not been erased if there was a partially written
521 		 * chunk due to power loss.  This checking policy should
522 		 * catch that case with very few checks and thus save a
523 		 * lot of checks that are most likely not needed.
524 		 *
525 		 * Mods to the above
526 		 * If an erase check fails or the write fails we skip the
527 		 * rest of the block.
528 		 */
529 
530 		/* let's give it a try */
531 		attempts++;
532 
533 		if (dev->param.always_check_erased)
534 			bi->skip_erased_check = 0;
535 
536 		if (!bi->skip_erased_check) {
537 			erased_ok = yaffs_check_chunk_erased(dev, chunk);
538 			if (erased_ok != YAFFS_OK) {
539 				yaffs_trace(YAFFS_TRACE_ERROR,
540 				  "**>> yaffs chunk %d was not erased",
541 				  chunk);
542 
543 				/* If not erased, delete this one,
544 				 * skip rest of block and
545 				 * try another chunk */
546 				yaffs_chunk_del(dev, chunk, 1, __LINE__);
547 				yaffs_skip_rest_of_block(dev);
548 				continue;
549 			}
550 		}
551 
552 		write_ok = yaffs_wr_chunk_tags_nand(dev, chunk, data, tags);
553 
554 		if (!bi->skip_erased_check)
555 			write_ok =
556 			    yaffs_verify_chunk_written(dev, chunk, data, tags);
557 
558 		if (write_ok != YAFFS_OK) {
559 			/* Clean up aborted write, skip to next block and
560 			 * try another chunk */
561 			yaffs_handle_chunk_wr_error(dev, chunk, erased_ok);
562 			continue;
563 		}
564 
565 		bi->skip_erased_check = 1;
566 
567 		/* Copy the data into the robustification buffer */
568 		yaffs_handle_chunk_wr_ok(dev, chunk, data, tags);
569 
570 	} while (write_ok != YAFFS_OK &&
571 		 (yaffs_wr_attempts <= 0 || attempts <= yaffs_wr_attempts));
572 
573 	if (!write_ok)
574 		chunk = -1;
575 
576 	if (attempts > 1) {
577 		yaffs_trace(YAFFS_TRACE_ERROR,
578 			"**>> yaffs write required %d attempts",
579 			attempts);
580 		dev->n_retried_writes += (attempts - 1);
581 	}
582 
583 	return chunk;
584 }
585 
586 /*
587  * Block retiring for handling a broken block.
588  */
589 
yaffs_retire_block(struct yaffs_dev * dev,int flash_block)590 static void yaffs_retire_block(struct yaffs_dev *dev, int flash_block)
591 {
592 	struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block);
593 
594 	yaffs2_checkpt_invalidate(dev);
595 
596 	yaffs2_clear_oldest_dirty_seq(dev, bi);
597 
598 	if (yaffs_mark_bad(dev, flash_block) != YAFFS_OK) {
599 		if (yaffs_erase_block(dev, flash_block) != YAFFS_OK) {
600 			yaffs_trace(YAFFS_TRACE_ALWAYS,
601 				"yaffs: Failed to mark bad and erase block %d",
602 				flash_block);
603 		} else {
604 			struct yaffs_ext_tags tags;
605 			int chunk_id =
606 			    flash_block * dev->param.chunks_per_block;
607 
608 			u8 *buffer = yaffs_get_temp_buffer(dev);
609 
610 			memset(buffer, 0xff, dev->data_bytes_per_chunk);
611 			memset(&tags, 0, sizeof(tags));
612 			tags.seq_number = YAFFS_SEQUENCE_BAD_BLOCK;
613 			if (dev->tagger.write_chunk_tags_fn(dev, chunk_id -
614 							dev->chunk_offset,
615 							buffer,
616 							&tags) != YAFFS_OK)
617 				yaffs_trace(YAFFS_TRACE_ALWAYS,
618 					"yaffs: Failed to write bad block marker to block %d",
619 					flash_block);
620 
621 			yaffs_release_temp_buffer(dev, buffer);
622 		}
623 	}
624 
625 	bi->block_state = YAFFS_BLOCK_STATE_DEAD;
626 	bi->gc_prioritise = 0;
627 	bi->needs_retiring = 0;
628 
629 	dev->n_retired_blocks++;
630 }
631 
632 /*---------------- Name handling functions ------------*/
633 
yaffs_calc_name_sum(const YCHAR * name)634 static u16 yaffs_calc_name_sum(const YCHAR *name)
635 {
636 	u16 sum = 0;
637 	u16 i = 1;
638 
639 	if (!name)
640 		return 0;
641 
642 	while ((*name) && i < (YAFFS_MAX_NAME_LENGTH / 2)) {
643 
644 		/* 0x1f mask is case insensitive */
645 		sum += ((*name) & 0x1f) * i;
646 		i++;
647 		name++;
648 	}
649 	return sum;
650 }
651 
652 
yaffs_set_obj_name(struct yaffs_obj * obj,const YCHAR * name)653 void yaffs_set_obj_name(struct yaffs_obj *obj, const YCHAR * name)
654 {
655 	memset(obj->short_name, 0, sizeof(obj->short_name));
656 
657 	if (name && !name[0]) {
658 		yaffs_fix_null_name(obj, obj->short_name,
659 				YAFFS_SHORT_NAME_LENGTH);
660 		name = obj->short_name;
661 	} else if (name &&
662 		strnlen(name, YAFFS_SHORT_NAME_LENGTH + 1) <=
663 		YAFFS_SHORT_NAME_LENGTH)  {
664 		strcpy(obj->short_name, name);
665 	}
666 
667 	obj->sum = yaffs_calc_name_sum(name);
668 }
669 
yaffs_set_obj_name_from_oh(struct yaffs_obj * obj,const struct yaffs_obj_hdr * oh)670 void yaffs_set_obj_name_from_oh(struct yaffs_obj *obj,
671 				const struct yaffs_obj_hdr *oh)
672 {
673 #ifdef CONFIG_YAFFS_AUTO_UNICODE
674 	YCHAR tmp_name[YAFFS_MAX_NAME_LENGTH + 1];
675 	memset(tmp_name, 0, sizeof(tmp_name));
676 	yaffs_load_name_from_oh(obj->my_dev, tmp_name, oh->name,
677 				YAFFS_MAX_NAME_LENGTH + 1);
678 	yaffs_set_obj_name(obj, tmp_name);
679 #else
680 	yaffs_set_obj_name(obj, oh->name);
681 #endif
682 }
683 
yaffs_max_file_size(struct yaffs_dev * dev)684 loff_t yaffs_max_file_size(struct yaffs_dev *dev)
685 {
686 	if(sizeof(loff_t) < 8)
687 		return YAFFS_MAX_FILE_SIZE_32;
688 	else
689 		return ((loff_t) YAFFS_MAX_CHUNK_ID) * dev->data_bytes_per_chunk;
690 }
691 
692 /*-------------------- TNODES -------------------
693 
694  * List of spare tnodes
695  * The list is hooked together using the first pointer
696  * in the tnode.
697  */
698 
yaffs_get_tnode(struct yaffs_dev * dev)699 struct yaffs_tnode *yaffs_get_tnode(struct yaffs_dev *dev)
700 {
701 	struct yaffs_tnode *tn = yaffs_alloc_raw_tnode(dev);
702 
703 	if (tn) {
704 		memset(tn, 0, dev->tnode_size);
705 		dev->n_tnodes++;
706 	}
707 
708 	dev->checkpoint_blocks_required = 0;	/* force recalculation */
709 
710 	return tn;
711 }
712 
713 /* FreeTnode frees up a tnode and puts it back on the free list */
yaffs_free_tnode(struct yaffs_dev * dev,struct yaffs_tnode * tn)714 static void yaffs_free_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn)
715 {
716 	yaffs_free_raw_tnode(dev, tn);
717 	dev->n_tnodes--;
718 	dev->checkpoint_blocks_required = 0;	/* force recalculation */
719 }
720 
yaffs_deinit_tnodes_and_objs(struct yaffs_dev * dev)721 static void yaffs_deinit_tnodes_and_objs(struct yaffs_dev *dev)
722 {
723 	yaffs_deinit_raw_tnodes_and_objs(dev);
724 	dev->n_obj = 0;
725 	dev->n_tnodes = 0;
726 }
727 
yaffs_load_tnode_0(struct yaffs_dev * dev,struct yaffs_tnode * tn,unsigned pos,unsigned val)728 static void yaffs_load_tnode_0(struct yaffs_dev *dev, struct yaffs_tnode *tn,
729 			unsigned pos, unsigned val)
730 {
731 	u32 *map = (u32 *) tn;
732 	u32 bit_in_map;
733 	u32 bit_in_word;
734 	u32 word_in_map;
735 	u32 mask;
736 
737 	pos &= YAFFS_TNODES_LEVEL0_MASK;
738 	val >>= dev->chunk_grp_bits;
739 
740 	bit_in_map = pos * dev->tnode_width;
741 	word_in_map = bit_in_map / 32;
742 	bit_in_word = bit_in_map & (32 - 1);
743 
744 	mask = dev->tnode_mask << bit_in_word;
745 
746 	map[word_in_map] &= ~mask;
747 	map[word_in_map] |= (mask & (val << bit_in_word));
748 
749 	if (dev->tnode_width > (32 - bit_in_word)) {
750 		bit_in_word = (32 - bit_in_word);
751 		word_in_map++;
752 		mask =
753 		    dev->tnode_mask >> bit_in_word;
754 		map[word_in_map] &= ~mask;
755 		map[word_in_map] |= (mask & (val >> bit_in_word));
756 	}
757 }
758 
yaffs_get_group_base(struct yaffs_dev * dev,struct yaffs_tnode * tn,unsigned pos)759 u32 yaffs_get_group_base(struct yaffs_dev *dev, struct yaffs_tnode *tn,
760 			 unsigned pos)
761 {
762 	u32 *map = (u32 *) tn;
763 	u32 bit_in_map;
764 	u32 bit_in_word;
765 	u32 word_in_map;
766 	u32 val;
767 
768 	pos &= YAFFS_TNODES_LEVEL0_MASK;
769 
770 	bit_in_map = pos * dev->tnode_width;
771 	word_in_map = bit_in_map / 32;
772 	bit_in_word = bit_in_map & (32 - 1);
773 
774 	val = map[word_in_map] >> bit_in_word;
775 
776 	if (dev->tnode_width > (32 - bit_in_word)) {
777 		bit_in_word = (32 - bit_in_word);
778 		word_in_map++;
779 		val |= (map[word_in_map] << bit_in_word);
780 	}
781 
782 	val &= dev->tnode_mask;
783 	val <<= dev->chunk_grp_bits;
784 
785 	return val;
786 }
787 
788 /* ------------------- End of individual tnode manipulation -----------------*/
789 
790 /* ---------Functions to manipulate the look-up tree (made up of tnodes) ------
791  * The look up tree is represented by the top tnode and the number of top_level
792  * in the tree. 0 means only the level 0 tnode is in the tree.
793  */
794 
795 /* FindLevel0Tnode finds the level 0 tnode, if one exists. */
yaffs_find_tnode_0(struct yaffs_dev * dev,struct yaffs_file_var * file_struct,u32 chunk_id)796 struct yaffs_tnode *yaffs_find_tnode_0(struct yaffs_dev *dev,
797 				       struct yaffs_file_var *file_struct,
798 				       u32 chunk_id)
799 {
800 	struct yaffs_tnode *tn = file_struct->top;
801 	u32 i;
802 	int required_depth;
803 	int level = file_struct->top_level;
804 
805 	(void) dev;
806 
807 	/* Check sane level and chunk Id */
808 	if (level < 0 || level > YAFFS_TNODES_MAX_LEVEL)
809 		return NULL;
810 
811 	if (chunk_id > YAFFS_MAX_CHUNK_ID)
812 		return NULL;
813 
814 	/* First check we're tall enough (ie enough top_level) */
815 
816 	i = chunk_id >> YAFFS_TNODES_LEVEL0_BITS;
817 	required_depth = 0;
818 	while (i) {
819 		i >>= YAFFS_TNODES_INTERNAL_BITS;
820 		required_depth++;
821 	}
822 
823 	if (required_depth > file_struct->top_level)
824 		return NULL;	/* Not tall enough, so we can't find it */
825 
826 	/* Traverse down to level 0 */
827 	while (level > 0 && tn) {
828 		tn = tn->internal[(chunk_id >>
829 				   (YAFFS_TNODES_LEVEL0_BITS +
830 				    (level - 1) *
831 				    YAFFS_TNODES_INTERNAL_BITS)) &
832 				  YAFFS_TNODES_INTERNAL_MASK];
833 		level--;
834 	}
835 
836 	return tn;
837 }
838 
839 /* add_find_tnode_0 finds the level 0 tnode if it exists,
840  * otherwise first expands the tree.
841  * This happens in two steps:
842  *  1. If the tree isn't tall enough, then make it taller.
843  *  2. Scan down the tree towards the level 0 tnode adding tnodes if required.
844  *
845  * Used when modifying the tree.
846  *
847  *  If the tn argument is NULL, then a fresh tnode will be added otherwise the
848  *  specified tn will be plugged into the ttree.
849  */
850 
yaffs_add_find_tnode_0(struct yaffs_dev * dev,struct yaffs_file_var * file_struct,u32 chunk_id,struct yaffs_tnode * passed_tn)851 struct yaffs_tnode *yaffs_add_find_tnode_0(struct yaffs_dev *dev,
852 					   struct yaffs_file_var *file_struct,
853 					   u32 chunk_id,
854 					   struct yaffs_tnode *passed_tn)
855 {
856 	int required_depth;
857 	int i;
858 	int l;
859 	struct yaffs_tnode *tn;
860 	u32 x;
861 
862 	/* Check sane level and page Id */
863 	if (file_struct->top_level < 0 ||
864 	    file_struct->top_level > YAFFS_TNODES_MAX_LEVEL)
865 		return NULL;
866 
867 	if (chunk_id > YAFFS_MAX_CHUNK_ID)
868 		return NULL;
869 
870 	/* First check we're tall enough (ie enough top_level) */
871 
872 	x = chunk_id >> YAFFS_TNODES_LEVEL0_BITS;
873 	required_depth = 0;
874 	while (x) {
875 		x >>= YAFFS_TNODES_INTERNAL_BITS;
876 		required_depth++;
877 	}
878 
879 	if (required_depth > file_struct->top_level) {
880 		/* Not tall enough, gotta make the tree taller */
881 		for (i = file_struct->top_level; i < required_depth; i++) {
882 
883 			tn = yaffs_get_tnode(dev);
884 
885 			if (tn) {
886 				tn->internal[0] = file_struct->top;
887 				file_struct->top = tn;
888 				file_struct->top_level++;
889 			} else {
890 				yaffs_trace(YAFFS_TRACE_ERROR,
891 					"yaffs: no more tnodes");
892 				return NULL;
893 			}
894 		}
895 	}
896 
897 	/* Traverse down to level 0, adding anything we need */
898 
899 	l = file_struct->top_level;
900 	tn = file_struct->top;
901 
902 	if (l > 0) {
903 		while (l > 0 && tn) {
904 			x = (chunk_id >>
905 			     (YAFFS_TNODES_LEVEL0_BITS +
906 			      (l - 1) * YAFFS_TNODES_INTERNAL_BITS)) &
907 			    YAFFS_TNODES_INTERNAL_MASK;
908 
909 			if ((l > 1) && !tn->internal[x]) {
910 				/* Add missing non-level-zero tnode */
911 				tn->internal[x] = yaffs_get_tnode(dev);
912 				if (!tn->internal[x])
913 					return NULL;
914 			} else if (l == 1) {
915 				/* Looking from level 1 at level 0 */
916 				if (passed_tn) {
917 					/* If we already have one, release it */
918 					if (tn->internal[x])
919 						yaffs_free_tnode(dev,
920 							tn->internal[x]);
921 					tn->internal[x] = passed_tn;
922 
923 				} else if (!tn->internal[x]) {
924 					/* Don't have one, none passed in */
925 					tn->internal[x] = yaffs_get_tnode(dev);
926 					if (!tn->internal[x])
927 						return NULL;
928 				}
929 			}
930 
931 			tn = tn->internal[x];
932 			l--;
933 		}
934 	} else {
935 		/* top is level 0 */
936 		if (passed_tn) {
937 			memcpy(tn, passed_tn,
938 			       (dev->tnode_width * YAFFS_NTNODES_LEVEL0) / 8);
939 			yaffs_free_tnode(dev, passed_tn);
940 		}
941 	}
942 
943 	return tn;
944 }
945 
yaffs_tags_match(const struct yaffs_ext_tags * tags,int obj_id,int chunk_obj)946 static int yaffs_tags_match(const struct yaffs_ext_tags *tags, int obj_id,
947 			    int chunk_obj)
948 {
949 	return (tags->chunk_id == chunk_obj &&
950 		tags->obj_id == obj_id &&
951 		!tags->is_deleted) ? 1 : 0;
952 
953 }
954 
yaffs_find_chunk_in_group(struct yaffs_dev * dev,int the_chunk,struct yaffs_ext_tags * tags,int obj_id,int inode_chunk)955 static int yaffs_find_chunk_in_group(struct yaffs_dev *dev, int the_chunk,
956 					struct yaffs_ext_tags *tags, int obj_id,
957 					int inode_chunk)
958 {
959 	int j;
960 
961 	for (j = 0; the_chunk && j < dev->chunk_grp_size; j++) {
962 		if (yaffs_check_chunk_bit
963 		    (dev, the_chunk / dev->param.chunks_per_block,
964 		     the_chunk % dev->param.chunks_per_block)) {
965 
966 			if (dev->chunk_grp_size == 1)
967 				return the_chunk;
968 			else {
969 				yaffs_rd_chunk_tags_nand(dev, the_chunk, NULL,
970 							 tags);
971 				if (yaffs_tags_match(tags,
972 							obj_id, inode_chunk)) {
973 					/* found it; */
974 					return the_chunk;
975 				}
976 			}
977 		}
978 		the_chunk++;
979 	}
980 	return -1;
981 }
982 
yaffs_find_chunk_in_file(struct yaffs_obj * in,int inode_chunk,struct yaffs_ext_tags * tags)983 static int yaffs_find_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
984 				    struct yaffs_ext_tags *tags)
985 {
986 	/*Get the Tnode, then get the level 0 offset chunk offset */
987 	struct yaffs_tnode *tn;
988 	int the_chunk = -1;
989 	struct yaffs_ext_tags local_tags;
990 	int ret_val = -1;
991 	struct yaffs_dev *dev = in->my_dev;
992 
993 	if (!tags) {
994 		/* Passed a NULL, so use our own tags space */
995 		tags = &local_tags;
996 	}
997 
998 	tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk);
999 
1000 	if (!tn)
1001 		return ret_val;
1002 
1003 	the_chunk = yaffs_get_group_base(dev, tn, inode_chunk);
1004 
1005 	ret_val = yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id,
1006 					      inode_chunk);
1007 	return ret_val;
1008 }
1009 
yaffs_find_del_file_chunk(struct yaffs_obj * in,int inode_chunk,struct yaffs_ext_tags * tags)1010 static int yaffs_find_del_file_chunk(struct yaffs_obj *in, int inode_chunk,
1011 				     struct yaffs_ext_tags *tags)
1012 {
1013 	/* Get the Tnode, then get the level 0 offset chunk offset */
1014 	struct yaffs_tnode *tn;
1015 	int the_chunk = -1;
1016 	struct yaffs_ext_tags local_tags;
1017 	struct yaffs_dev *dev = in->my_dev;
1018 	int ret_val = -1;
1019 
1020 	if (!tags) {
1021 		/* Passed a NULL, so use our own tags space */
1022 		tags = &local_tags;
1023 	}
1024 
1025 	tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk);
1026 
1027 	if (!tn)
1028 		return ret_val;
1029 
1030 	the_chunk = yaffs_get_group_base(dev, tn, inode_chunk);
1031 
1032 	ret_val = yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id,
1033 					      inode_chunk);
1034 
1035 	/* Delete the entry in the filestructure (if found) */
1036 	if (ret_val != -1)
1037 		yaffs_load_tnode_0(dev, tn, inode_chunk, 0);
1038 
1039 	return ret_val;
1040 }
1041 
yaffs_put_chunk_in_file(struct yaffs_obj * in,int inode_chunk,int nand_chunk,int in_scan)1042 int yaffs_put_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
1043 			    int nand_chunk, int in_scan)
1044 {
1045 	/* NB in_scan is zero unless scanning.
1046 	 * For forward scanning, in_scan is > 0;
1047 	 * for backward scanning in_scan is < 0
1048 	 *
1049 	 * nand_chunk = 0 is a dummy insert to make sure the tnodes are there.
1050 	 */
1051 
1052 	struct yaffs_tnode *tn;
1053 	struct yaffs_dev *dev = in->my_dev;
1054 	int existing_cunk;
1055 	struct yaffs_ext_tags existing_tags;
1056 	struct yaffs_ext_tags new_tags;
1057 	unsigned existing_serial, new_serial;
1058 
1059 	if (in->variant_type != YAFFS_OBJECT_TYPE_FILE) {
1060 		/* Just ignore an attempt at putting a chunk into a non-file
1061 		 * during scanning.
1062 		 * If it is not during Scanning then something went wrong!
1063 		 */
1064 		if (!in_scan) {
1065 			yaffs_trace(YAFFS_TRACE_ERROR,
1066 				"yaffs tragedy:attempt to put data chunk into a non-file"
1067 				);
1068 			BUG();
1069 		}
1070 
1071 		yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
1072 		return YAFFS_OK;
1073 	}
1074 
1075 	tn = yaffs_add_find_tnode_0(dev,
1076 				    &in->variant.file_variant,
1077 				    inode_chunk, NULL);
1078 	if (!tn)
1079 		return YAFFS_FAIL;
1080 
1081 	if (!nand_chunk)
1082 		/* Dummy insert, bail now */
1083 		return YAFFS_OK;
1084 
1085 	existing_cunk = yaffs_get_group_base(dev, tn, inode_chunk);
1086 
1087 	if (in_scan != 0) {
1088 		/* If we're scanning then we need to test for duplicates
1089 		 * NB This does not need to be efficient since it should only
1090 		 * happen when the power fails during a write, then only one
1091 		 * chunk should ever be affected.
1092 		 *
1093 		 * Correction for YAFFS2: This could happen quite a lot and we
1094 		 * need to think about efficiency! TODO
1095 		 * Update: For backward scanning we don't need to re-read tags
1096 		 * so this is quite cheap.
1097 		 */
1098 
1099 		if (existing_cunk > 0) {
1100 			/* NB Right now existing chunk will not be real
1101 			 * chunk_id if the chunk group size > 1
1102 			 * thus we have to do a FindChunkInFile to get the
1103 			 * real chunk id.
1104 			 *
1105 			 * We have a duplicate now we need to decide which
1106 			 * one to use:
1107 			 *
1108 			 * Backwards scanning YAFFS2: The old one is what
1109 			 * we use, dump the new one.
1110 			 * YAFFS1: Get both sets of tags and compare serial
1111 			 * numbers.
1112 			 */
1113 
1114 			if (in_scan > 0) {
1115 				/* Only do this for forward scanning */
1116 				yaffs_rd_chunk_tags_nand(dev,
1117 							 nand_chunk,
1118 							 NULL, &new_tags);
1119 
1120 				/* Do a proper find */
1121 				existing_cunk =
1122 				    yaffs_find_chunk_in_file(in, inode_chunk,
1123 							     &existing_tags);
1124 			}
1125 
1126 			if (existing_cunk <= 0) {
1127 				/*Hoosterman - how did this happen? */
1128 
1129 				yaffs_trace(YAFFS_TRACE_ERROR,
1130 					"yaffs tragedy: existing chunk < 0 in scan"
1131 					);
1132 
1133 			}
1134 
1135 			/* NB The deleted flags should be false, otherwise
1136 			 * the chunks will not be loaded during a scan
1137 			 */
1138 
1139 			if (in_scan > 0) {
1140 				new_serial = new_tags.serial_number;
1141 				existing_serial = existing_tags.serial_number;
1142 			}
1143 
1144 			if ((in_scan > 0) &&
1145 			    (existing_cunk <= 0 ||
1146 			     ((existing_serial + 1) & 3) == new_serial)) {
1147 				/* Forward scanning.
1148 				 * Use new
1149 				 * Delete the old one and drop through to
1150 				 * update the tnode
1151 				 */
1152 				yaffs_chunk_del(dev, existing_cunk, 1,
1153 						__LINE__);
1154 			} else {
1155 				/* Backward scanning or we want to use the
1156 				 * existing one
1157 				 * Delete the new one and return early so that
1158 				 * the tnode isn't changed
1159 				 */
1160 				yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
1161 				return YAFFS_OK;
1162 			}
1163 		}
1164 
1165 	}
1166 
1167 	if (existing_cunk == 0)
1168 		in->n_data_chunks++;
1169 
1170 	yaffs_load_tnode_0(dev, tn, inode_chunk, nand_chunk);
1171 
1172 	return YAFFS_OK;
1173 }
1174 
yaffs_soft_del_chunk(struct yaffs_dev * dev,int chunk)1175 static void yaffs_soft_del_chunk(struct yaffs_dev *dev, int chunk)
1176 {
1177 	struct yaffs_block_info *the_block;
1178 	unsigned block_no;
1179 
1180 	yaffs_trace(YAFFS_TRACE_DELETION, "soft delete chunk %d", chunk);
1181 
1182 	block_no = chunk / dev->param.chunks_per_block;
1183 	the_block = yaffs_get_block_info(dev, block_no);
1184 	if (the_block) {
1185 		the_block->soft_del_pages++;
1186 		dev->n_free_chunks++;
1187 		yaffs2_update_oldest_dirty_seq(dev, block_no, the_block);
1188 	}
1189 }
1190 
1191 /* SoftDeleteWorker scans backwards through the tnode tree and soft deletes all
1192  * the chunks in the file.
1193  * All soft deleting does is increment the block's softdelete count and pulls
1194  * the chunk out of the tnode.
1195  * Thus, essentially this is the same as DeleteWorker except that the chunks
1196  * are soft deleted.
1197  */
1198 
yaffs_soft_del_worker(struct yaffs_obj * in,struct yaffs_tnode * tn,u32 level,int chunk_offset)1199 static int yaffs_soft_del_worker(struct yaffs_obj *in, struct yaffs_tnode *tn,
1200 				 u32 level, int chunk_offset)
1201 {
1202 	int i;
1203 	int the_chunk;
1204 	int all_done = 1;
1205 	struct yaffs_dev *dev = in->my_dev;
1206 
1207 	if (!tn)
1208 		return 1;
1209 
1210 	if (level > 0) {
1211 		for (i = YAFFS_NTNODES_INTERNAL - 1;
1212 			all_done && i >= 0;
1213 			i--) {
1214 			if (tn->internal[i]) {
1215 				all_done =
1216 				    yaffs_soft_del_worker(in,
1217 					tn->internal[i],
1218 					level - 1,
1219 					(chunk_offset <<
1220 					YAFFS_TNODES_INTERNAL_BITS)
1221 					+ i);
1222 				if (all_done) {
1223 					yaffs_free_tnode(dev,
1224 						tn->internal[i]);
1225 					tn->internal[i] = NULL;
1226 				} else {
1227 					/* Can this happen? */
1228 				}
1229 			}
1230 		}
1231 		return (all_done) ? 1 : 0;
1232 	}
1233 
1234 	/* level 0 */
1235 	 for (i = YAFFS_NTNODES_LEVEL0 - 1; i >= 0; i--) {
1236 		the_chunk = yaffs_get_group_base(dev, tn, i);
1237 		if (the_chunk) {
1238 			yaffs_soft_del_chunk(dev, the_chunk);
1239 			yaffs_load_tnode_0(dev, tn, i, 0);
1240 		}
1241 	}
1242 	return 1;
1243 }
1244 
yaffs_remove_obj_from_dir(struct yaffs_obj * obj)1245 static void yaffs_remove_obj_from_dir(struct yaffs_obj *obj)
1246 {
1247 	struct yaffs_dev *dev = obj->my_dev;
1248 	struct yaffs_obj *parent;
1249 
1250 	yaffs_verify_obj_in_dir(obj);
1251 	parent = obj->parent;
1252 
1253 	yaffs_verify_dir(parent);
1254 
1255 	if (dev && dev->param.remove_obj_fn)
1256 		dev->param.remove_obj_fn(obj);
1257 
1258 	list_del_init(&obj->siblings);
1259 	obj->parent = NULL;
1260 
1261 	yaffs_verify_dir(parent);
1262 }
1263 
yaffs_add_obj_to_dir(struct yaffs_obj * directory,struct yaffs_obj * obj)1264 void yaffs_add_obj_to_dir(struct yaffs_obj *directory, struct yaffs_obj *obj)
1265 {
1266 	if (!directory) {
1267 		yaffs_trace(YAFFS_TRACE_ALWAYS,
1268 			"tragedy: Trying to add an object to a null pointer directory"
1269 			);
1270 		BUG();
1271 		return;
1272 	}
1273 	if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
1274 		yaffs_trace(YAFFS_TRACE_ALWAYS,
1275 			"tragedy: Trying to add an object to a non-directory"
1276 			);
1277 		BUG();
1278 	}
1279 
1280 	if (obj->siblings.prev == NULL) {
1281 		/* Not initialised */
1282 		BUG();
1283 	}
1284 
1285 	yaffs_verify_dir(directory);
1286 
1287 	yaffs_remove_obj_from_dir(obj);
1288 
1289 	/* Now add it */
1290 	list_add(&obj->siblings, &directory->variant.dir_variant.children);
1291 	obj->parent = directory;
1292 
1293 	if (directory == obj->my_dev->unlinked_dir
1294 	    || directory == obj->my_dev->del_dir) {
1295 		obj->unlinked = 1;
1296 		obj->my_dev->n_unlinked_files++;
1297 		obj->rename_allowed = 0;
1298 	}
1299 
1300 	yaffs_verify_dir(directory);
1301 	yaffs_verify_obj_in_dir(obj);
1302 }
1303 
yaffs_change_obj_name(struct yaffs_obj * obj,struct yaffs_obj * new_dir,const YCHAR * new_name,int force,int shadows)1304 static int yaffs_change_obj_name(struct yaffs_obj *obj,
1305 				 struct yaffs_obj *new_dir,
1306 				 const YCHAR *new_name, int force, int shadows)
1307 {
1308 	int unlink_op;
1309 	int del_op;
1310 	struct yaffs_obj *existing_target;
1311 
1312 	if (new_dir == NULL)
1313 		new_dir = obj->parent;	/* use the old directory */
1314 
1315 	if (new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
1316 		yaffs_trace(YAFFS_TRACE_ALWAYS,
1317 			"tragedy: yaffs_change_obj_name: new_dir is not a directory"
1318 			);
1319 		BUG();
1320 	}
1321 
1322 	unlink_op = (new_dir == obj->my_dev->unlinked_dir);
1323 	del_op = (new_dir == obj->my_dev->del_dir);
1324 
1325 	existing_target = yaffs_find_by_name(new_dir, new_name);
1326 
1327 	/* If the object is a file going into the unlinked directory,
1328 	 *   then it is OK to just stuff it in since duplicate names are OK.
1329 	 *   else only proceed if the new name does not exist and we're putting
1330 	 *   it into a directory.
1331 	 */
1332 	if (!(unlink_op || del_op || force ||
1333 	      shadows > 0 || !existing_target) ||
1334 	      new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
1335 		return YAFFS_FAIL;
1336 
1337 	yaffs_set_obj_name(obj, new_name);
1338 	obj->dirty = 1;
1339 	yaffs_add_obj_to_dir(new_dir, obj);
1340 
1341 	if (unlink_op)
1342 		obj->unlinked = 1;
1343 
1344 	/* If it is a deletion then we mark it as a shrink for gc  */
1345 	if (yaffs_update_oh(obj, new_name, 0, del_op, shadows, NULL) >= 0)
1346 		return YAFFS_OK;
1347 
1348 	return YAFFS_FAIL;
1349 }
1350 
1351 /*------------------------ Short Operations Cache ------------------------------
1352  *   In many situations where there is no high level buffering  a lot of
1353  *   reads might be short sequential reads, and a lot of writes may be short
1354  *   sequential writes. eg. scanning/writing a jpeg file.
1355  *   In these cases, a short read/write cache can provide a huge perfomance
1356  *   benefit with dumb-as-a-rock code.
1357  *   In Linux, the page cache provides read buffering and the short op cache
1358  *   provides write buffering.
1359  *
1360  *   There are a small number (~10) of cache chunks per device so that we don't
1361  *   need a very intelligent search.
1362  */
1363 
yaffs_obj_cache_dirty(struct yaffs_obj * obj)1364 static int yaffs_obj_cache_dirty(struct yaffs_obj *obj)
1365 {
1366 	struct yaffs_dev *dev = obj->my_dev;
1367 	int i;
1368 	struct yaffs_cache *cache;
1369 	int n_caches = obj->my_dev->param.n_caches;
1370 
1371 	for (i = 0; i < n_caches; i++) {
1372 		cache = &dev->cache[i];
1373 		if (cache->object == obj && cache->dirty)
1374 			return 1;
1375 	}
1376 
1377 	return 0;
1378 }
1379 
yaffs_flush_file_cache(struct yaffs_obj * obj)1380 static void yaffs_flush_file_cache(struct yaffs_obj *obj)
1381 {
1382 	struct yaffs_dev *dev = obj->my_dev;
1383 	int lowest = -99;	/* Stop compiler whining. */
1384 	int i;
1385 	struct yaffs_cache *cache;
1386 	int chunk_written = 0;
1387 	int n_caches = obj->my_dev->param.n_caches;
1388 
1389 	if (n_caches < 1)
1390 		return;
1391 	do {
1392 		cache = NULL;
1393 
1394 		/* Find the lowest dirty chunk for this object */
1395 		for (i = 0; i < n_caches; i++) {
1396 			if (dev->cache[i].object == obj &&
1397 			    dev->cache[i].dirty) {
1398 				if (!cache ||
1399 				    dev->cache[i].chunk_id < lowest) {
1400 					cache = &dev->cache[i];
1401 					lowest = cache->chunk_id;
1402 				}
1403 			}
1404 		}
1405 
1406 		if (cache && !cache->locked) {
1407 			/* Write it out and free it up */
1408 			chunk_written =
1409 			    yaffs_wr_data_obj(cache->object,
1410 					      cache->chunk_id,
1411 					      cache->data,
1412 					      cache->n_bytes, 1);
1413 			cache->dirty = 0;
1414 			cache->object = NULL;
1415 		}
1416 	} while (cache && chunk_written > 0);
1417 
1418 	if (cache)
1419 		/* Hoosterman, disk full while writing cache out. */
1420 		yaffs_trace(YAFFS_TRACE_ERROR,
1421 			"yaffs tragedy: no space during cache write");
1422 }
1423 
1424 /*yaffs_flush_whole_cache(dev)
1425  *
1426  *
1427  */
1428 
yaffs_flush_whole_cache(struct yaffs_dev * dev)1429 void yaffs_flush_whole_cache(struct yaffs_dev *dev)
1430 {
1431 	struct yaffs_obj *obj;
1432 	int n_caches = dev->param.n_caches;
1433 	int i;
1434 
1435 	/* Find a dirty object in the cache and flush it...
1436 	 * until there are no further dirty objects.
1437 	 */
1438 	do {
1439 		obj = NULL;
1440 		for (i = 0; i < n_caches && !obj; i++) {
1441 			if (dev->cache[i].object && dev->cache[i].dirty)
1442 				obj = dev->cache[i].object;
1443 		}
1444 		if (obj)
1445 			yaffs_flush_file_cache(obj);
1446 	} while (obj);
1447 
1448 }
1449 
1450 /* Grab us a cache chunk for use.
1451  * First look for an empty one.
1452  * Then look for the least recently used non-dirty one.
1453  * Then look for the least recently used dirty one...., flush and look again.
1454  */
yaffs_grab_chunk_worker(struct yaffs_dev * dev)1455 static struct yaffs_cache *yaffs_grab_chunk_worker(struct yaffs_dev *dev)
1456 {
1457 	int i;
1458 
1459 	if (dev->param.n_caches > 0) {
1460 		for (i = 0; i < dev->param.n_caches; i++) {
1461 			if (!dev->cache[i].object)
1462 				return &dev->cache[i];
1463 		}
1464 	}
1465 	return NULL;
1466 }
1467 
yaffs_grab_chunk_cache(struct yaffs_dev * dev)1468 static struct yaffs_cache *yaffs_grab_chunk_cache(struct yaffs_dev *dev)
1469 {
1470 	struct yaffs_cache *cache;
1471 	struct yaffs_obj *the_obj;
1472 	int usage;
1473 	int i;
1474 	int pushout;
1475 
1476 	if (dev->param.n_caches < 1)
1477 		return NULL;
1478 
1479 	/* Try find a non-dirty one... */
1480 
1481 	cache = yaffs_grab_chunk_worker(dev);
1482 
1483 	if (!cache) {
1484 		/* They were all dirty, find the LRU object and flush
1485 		 * its cache, then  find again.
1486 		 * NB what's here is not very accurate,
1487 		 * we actually flush the object with the LRU chunk.
1488 		 */
1489 
1490 		/* With locking we can't assume we can use entry zero,
1491 		 * Set the_obj to a valid pointer for Coverity. */
1492 		the_obj = dev->cache[0].object;
1493 		usage = -1;
1494 		cache = NULL;
1495 		pushout = -1;
1496 
1497 		for (i = 0; i < dev->param.n_caches; i++) {
1498 			if (dev->cache[i].object &&
1499 			    !dev->cache[i].locked &&
1500 			    (dev->cache[i].last_use < usage ||
1501 			    !cache)) {
1502 				usage = dev->cache[i].last_use;
1503 				the_obj = dev->cache[i].object;
1504 				cache = &dev->cache[i];
1505 				pushout = i;
1506 			}
1507 		}
1508 
1509 		if (!cache || cache->dirty) {
1510 			/* Flush and try again */
1511 			yaffs_flush_file_cache(the_obj);
1512 			cache = yaffs_grab_chunk_worker(dev);
1513 		}
1514 	}
1515 	return cache;
1516 }
1517 
1518 /* Find a cached chunk */
yaffs_find_chunk_cache(const struct yaffs_obj * obj,int chunk_id)1519 static struct yaffs_cache *yaffs_find_chunk_cache(const struct yaffs_obj *obj,
1520 						  int chunk_id)
1521 {
1522 	struct yaffs_dev *dev = obj->my_dev;
1523 	int i;
1524 
1525 	if (dev->param.n_caches < 1)
1526 		return NULL;
1527 
1528 	for (i = 0; i < dev->param.n_caches; i++) {
1529 		if (dev->cache[i].object == obj &&
1530 		    dev->cache[i].chunk_id == chunk_id) {
1531 			dev->cache_hits++;
1532 
1533 			return &dev->cache[i];
1534 		}
1535 	}
1536 	return NULL;
1537 }
1538 
1539 /* Mark the chunk for the least recently used algorithym */
yaffs_use_cache(struct yaffs_dev * dev,struct yaffs_cache * cache,int is_write)1540 static void yaffs_use_cache(struct yaffs_dev *dev, struct yaffs_cache *cache,
1541 			    int is_write)
1542 {
1543 	int i;
1544 
1545 	if (dev->param.n_caches < 1)
1546 		return;
1547 
1548 	if (dev->cache_last_use < 0 ||
1549 		dev->cache_last_use > 100000000) {
1550 		/* Reset the cache usages */
1551 		for (i = 1; i < dev->param.n_caches; i++)
1552 			dev->cache[i].last_use = 0;
1553 
1554 		dev->cache_last_use = 0;
1555 	}
1556 	dev->cache_last_use++;
1557 	cache->last_use = dev->cache_last_use;
1558 
1559 	if (is_write)
1560 		cache->dirty = 1;
1561 }
1562 
1563 /* Invalidate a single cache page.
1564  * Do this when a whole page gets written,
1565  * ie the short cache for this page is no longer valid.
1566  */
yaffs_invalidate_chunk_cache(struct yaffs_obj * object,int chunk_id)1567 static void yaffs_invalidate_chunk_cache(struct yaffs_obj *object, int chunk_id)
1568 {
1569 	struct yaffs_cache *cache;
1570 
1571 	if (object->my_dev->param.n_caches > 0) {
1572 		cache = yaffs_find_chunk_cache(object, chunk_id);
1573 
1574 		if (cache)
1575 			cache->object = NULL;
1576 	}
1577 }
1578 
1579 /* Invalidate all the cache pages associated with this object
1580  * Do this whenever ther file is deleted or resized.
1581  */
yaffs_invalidate_whole_cache(struct yaffs_obj * in)1582 static void yaffs_invalidate_whole_cache(struct yaffs_obj *in)
1583 {
1584 	int i;
1585 	struct yaffs_dev *dev = in->my_dev;
1586 
1587 	if (dev->param.n_caches > 0) {
1588 		/* Invalidate it. */
1589 		for (i = 0; i < dev->param.n_caches; i++) {
1590 			if (dev->cache[i].object == in)
1591 				dev->cache[i].object = NULL;
1592 		}
1593 	}
1594 }
1595 
yaffs_unhash_obj(struct yaffs_obj * obj)1596 static void yaffs_unhash_obj(struct yaffs_obj *obj)
1597 {
1598 	int bucket;
1599 	struct yaffs_dev *dev = obj->my_dev;
1600 
1601 	/* If it is still linked into the bucket list, free from the list */
1602 	if (!list_empty(&obj->hash_link)) {
1603 		list_del_init(&obj->hash_link);
1604 		bucket = yaffs_hash_fn(obj->obj_id);
1605 		dev->obj_bucket[bucket].count--;
1606 	}
1607 }
1608 
1609 /*  FreeObject frees up a Object and puts it back on the free list */
yaffs_free_obj(struct yaffs_obj * obj)1610 static void yaffs_free_obj(struct yaffs_obj *obj)
1611 {
1612 	struct yaffs_dev *dev;
1613 
1614 	if (!obj) {
1615 		BUG();
1616 		return;
1617 	}
1618 	dev = obj->my_dev;
1619 	yaffs_trace(YAFFS_TRACE_OS, "FreeObject %p inode %p",
1620 		obj, obj->my_inode);
1621 	if (obj->parent)
1622 		BUG();
1623 	if (!list_empty(&obj->siblings))
1624 		BUG();
1625 
1626 	if (obj->my_inode) {
1627 		/* We're still hooked up to a cached inode.
1628 		 * Don't delete now, but mark for later deletion
1629 		 */
1630 		obj->defered_free = 1;
1631 		return;
1632 	}
1633 
1634 	yaffs_unhash_obj(obj);
1635 
1636 	yaffs_free_raw_obj(dev, obj);
1637 	dev->n_obj--;
1638 	dev->checkpoint_blocks_required = 0;	/* force recalculation */
1639 }
1640 
yaffs_handle_defered_free(struct yaffs_obj * obj)1641 void yaffs_handle_defered_free(struct yaffs_obj *obj)
1642 {
1643 	if (obj->defered_free)
1644 		yaffs_free_obj(obj);
1645 }
1646 
yaffs_generic_obj_del(struct yaffs_obj * in)1647 static int yaffs_generic_obj_del(struct yaffs_obj *in)
1648 {
1649 	/* Iinvalidate the file's data in the cache, without flushing. */
1650 	yaffs_invalidate_whole_cache(in);
1651 
1652 	if (in->my_dev->param.is_yaffs2 && in->parent != in->my_dev->del_dir) {
1653 		/* Move to unlinked directory so we have a deletion record */
1654 		yaffs_change_obj_name(in, in->my_dev->del_dir, _Y("deleted"), 0,
1655 				      0);
1656 	}
1657 
1658 	yaffs_remove_obj_from_dir(in);
1659 	yaffs_chunk_del(in->my_dev, in->hdr_chunk, 1, __LINE__);
1660 	in->hdr_chunk = 0;
1661 
1662 	yaffs_free_obj(in);
1663 	return YAFFS_OK;
1664 
1665 }
1666 
yaffs_soft_del_file(struct yaffs_obj * obj)1667 static void yaffs_soft_del_file(struct yaffs_obj *obj)
1668 {
1669 	if (!obj->deleted ||
1670 	    obj->variant_type != YAFFS_OBJECT_TYPE_FILE ||
1671 	    obj->soft_del)
1672 		return;
1673 
1674 	if (obj->n_data_chunks <= 0) {
1675 		/* Empty file with no duplicate object headers,
1676 		 * just delete it immediately */
1677 		yaffs_free_tnode(obj->my_dev, obj->variant.file_variant.top);
1678 		obj->variant.file_variant.top = NULL;
1679 		yaffs_trace(YAFFS_TRACE_TRACING,
1680 			"yaffs: Deleting empty file %d",
1681 			obj->obj_id);
1682 		yaffs_generic_obj_del(obj);
1683 	} else {
1684 		yaffs_soft_del_worker(obj,
1685 				      obj->variant.file_variant.top,
1686 				      obj->variant.
1687 				      file_variant.top_level, 0);
1688 		obj->soft_del = 1;
1689 	}
1690 }
1691 
1692 /* Pruning removes any part of the file structure tree that is beyond the
1693  * bounds of the file (ie that does not point to chunks).
1694  *
1695  * A file should only get pruned when its size is reduced.
1696  *
1697  * Before pruning, the chunks must be pulled from the tree and the
1698  * level 0 tnode entries must be zeroed out.
1699  * Could also use this for file deletion, but that's probably better handled
1700  * by a special case.
1701  *
1702  * This function is recursive. For levels > 0 the function is called again on
1703  * any sub-tree. For level == 0 we just check if the sub-tree has data.
1704  * If there is no data in a subtree then it is pruned.
1705  */
1706 
yaffs_prune_worker(struct yaffs_dev * dev,struct yaffs_tnode * tn,u32 level,int del0)1707 static struct yaffs_tnode *yaffs_prune_worker(struct yaffs_dev *dev,
1708 					      struct yaffs_tnode *tn, u32 level,
1709 					      int del0)
1710 {
1711 	int i;
1712 	int has_data;
1713 
1714 	if (!tn)
1715 		return tn;
1716 
1717 	has_data = 0;
1718 
1719 	if (level > 0) {
1720 		for (i = 0; i < YAFFS_NTNODES_INTERNAL; i++) {
1721 			if (tn->internal[i]) {
1722 				tn->internal[i] =
1723 				    yaffs_prune_worker(dev,
1724 						tn->internal[i],
1725 						level - 1,
1726 						(i == 0) ? del0 : 1);
1727 			}
1728 
1729 			if (tn->internal[i])
1730 				has_data++;
1731 		}
1732 	} else {
1733 		int tnode_size_u32 = dev->tnode_size / sizeof(u32);
1734 		u32 *map = (u32 *) tn;
1735 
1736 		for (i = 0; !has_data && i < tnode_size_u32; i++) {
1737 			if (map[i])
1738 				has_data++;
1739 		}
1740 	}
1741 
1742 	if (has_data == 0 && del0) {
1743 		/* Free and return NULL */
1744 		yaffs_free_tnode(dev, tn);
1745 		tn = NULL;
1746 	}
1747 	return tn;
1748 }
1749 
yaffs_prune_tree(struct yaffs_dev * dev,struct yaffs_file_var * file_struct)1750 static int yaffs_prune_tree(struct yaffs_dev *dev,
1751 			    struct yaffs_file_var *file_struct)
1752 {
1753 	int i;
1754 	int has_data;
1755 	int done = 0;
1756 	struct yaffs_tnode *tn;
1757 
1758 	if (file_struct->top_level < 1)
1759 		return YAFFS_OK;
1760 
1761 	file_struct->top =
1762 	   yaffs_prune_worker(dev, file_struct->top, file_struct->top_level, 0);
1763 
1764 	/* Now we have a tree with all the non-zero branches NULL but
1765 	 * the height is the same as it was.
1766 	 * Let's see if we can trim internal tnodes to shorten the tree.
1767 	 * We can do this if only the 0th element in the tnode is in use
1768 	 * (ie all the non-zero are NULL)
1769 	 */
1770 
1771 	while (file_struct->top_level && !done) {
1772 		tn = file_struct->top;
1773 
1774 		has_data = 0;
1775 		for (i = 1; i < YAFFS_NTNODES_INTERNAL; i++) {
1776 			if (tn->internal[i])
1777 				has_data++;
1778 		}
1779 
1780 		if (!has_data) {
1781 			file_struct->top = tn->internal[0];
1782 			file_struct->top_level--;
1783 			yaffs_free_tnode(dev, tn);
1784 		} else {
1785 			done = 1;
1786 		}
1787 	}
1788 
1789 	return YAFFS_OK;
1790 }
1791 
1792 /*-------------------- End of File Structure functions.-------------------*/
1793 
1794 /* alloc_empty_obj gets us a clean Object.*/
yaffs_alloc_empty_obj(struct yaffs_dev * dev)1795 static struct yaffs_obj *yaffs_alloc_empty_obj(struct yaffs_dev *dev)
1796 {
1797 	struct yaffs_obj *obj = yaffs_alloc_raw_obj(dev);
1798 
1799 	if (!obj)
1800 		return obj;
1801 
1802 	dev->n_obj++;
1803 
1804 	/* Now sweeten it up... */
1805 
1806 	memset(obj, 0, sizeof(struct yaffs_obj));
1807 	obj->being_created = 1;
1808 
1809 	obj->my_dev = dev;
1810 	obj->hdr_chunk = 0;
1811 	obj->variant_type = YAFFS_OBJECT_TYPE_UNKNOWN;
1812 	INIT_LIST_HEAD(&(obj->hard_links));
1813 	INIT_LIST_HEAD(&(obj->hash_link));
1814 	INIT_LIST_HEAD(&obj->siblings);
1815 
1816 	/* Now make the directory sane */
1817 	if (dev->root_dir) {
1818 		obj->parent = dev->root_dir;
1819 		list_add(&(obj->siblings),
1820 			 &dev->root_dir->variant.dir_variant.children);
1821 	}
1822 
1823 	/* Add it to the lost and found directory.
1824 	 * NB Can't put root or lost-n-found in lost-n-found so
1825 	 * check if lost-n-found exists first
1826 	 */
1827 	if (dev->lost_n_found)
1828 		yaffs_add_obj_to_dir(dev->lost_n_found, obj);
1829 
1830 	obj->being_created = 0;
1831 
1832 	dev->checkpoint_blocks_required = 0;	/* force recalculation */
1833 
1834 	return obj;
1835 }
1836 
yaffs_find_nice_bucket(struct yaffs_dev * dev)1837 static int yaffs_find_nice_bucket(struct yaffs_dev *dev)
1838 {
1839 	int i;
1840 	int l = 999;
1841 	int lowest = 999999;
1842 
1843 	/* Search for the shortest list or one that
1844 	 * isn't too long.
1845 	 */
1846 
1847 	for (i = 0; i < 10 && lowest > 4; i++) {
1848 		dev->bucket_finder++;
1849 		dev->bucket_finder %= YAFFS_NOBJECT_BUCKETS;
1850 		if (dev->obj_bucket[dev->bucket_finder].count < lowest) {
1851 			lowest = dev->obj_bucket[dev->bucket_finder].count;
1852 			l = dev->bucket_finder;
1853 		}
1854 	}
1855 
1856 	return l;
1857 }
1858 
yaffs_new_obj_id(struct yaffs_dev * dev)1859 static int yaffs_new_obj_id(struct yaffs_dev *dev)
1860 {
1861 	int bucket = yaffs_find_nice_bucket(dev);
1862 	int found = 0;
1863 	struct list_head *i;
1864 	u32 n = (u32) bucket;
1865 
1866 	/* Now find an object value that has not already been taken
1867 	 * by scanning the list.
1868 	 */
1869 
1870 	while (!found) {
1871 		found = 1;
1872 		n += YAFFS_NOBJECT_BUCKETS;
1873 		if (1 || dev->obj_bucket[bucket].count > 0) {
1874 			list_for_each(i, &dev->obj_bucket[bucket].list) {
1875 				/* If there is already one in the list */
1876 				if (i && list_entry(i, struct yaffs_obj,
1877 						    hash_link)->obj_id == n) {
1878 					found = 0;
1879 				}
1880 			}
1881 		}
1882 	}
1883 	return n;
1884 }
1885 
yaffs_hash_obj(struct yaffs_obj * in)1886 static void yaffs_hash_obj(struct yaffs_obj *in)
1887 {
1888 	int bucket = yaffs_hash_fn(in->obj_id);
1889 	struct yaffs_dev *dev = in->my_dev;
1890 
1891 	list_add(&in->hash_link, &dev->obj_bucket[bucket].list);
1892 	dev->obj_bucket[bucket].count++;
1893 }
1894 
yaffs_find_by_number(struct yaffs_dev * dev,u32 number)1895 struct yaffs_obj *yaffs_find_by_number(struct yaffs_dev *dev, u32 number)
1896 {
1897 	int bucket = yaffs_hash_fn(number);
1898 	struct list_head *i;
1899 	struct yaffs_obj *in;
1900 
1901 	list_for_each(i, &dev->obj_bucket[bucket].list) {
1902 		/* Look if it is in the list */
1903 		in = list_entry(i, struct yaffs_obj, hash_link);
1904 		if (in->obj_id == number) {
1905 			/* Don't show if it is defered free */
1906 			if (in->defered_free)
1907 				return NULL;
1908 			return in;
1909 		}
1910 	}
1911 
1912 	return NULL;
1913 }
1914 
yaffs_new_obj(struct yaffs_dev * dev,int number,enum yaffs_obj_type type)1915 static struct yaffs_obj *yaffs_new_obj(struct yaffs_dev *dev, int number,
1916 				enum yaffs_obj_type type)
1917 {
1918 	struct yaffs_obj *the_obj = NULL;
1919 	struct yaffs_tnode *tn = NULL;
1920 
1921 	if (number < 0)
1922 		number = yaffs_new_obj_id(dev);
1923 
1924 	if (type == YAFFS_OBJECT_TYPE_FILE) {
1925 		tn = yaffs_get_tnode(dev);
1926 		if (!tn)
1927 			return NULL;
1928 	}
1929 
1930 	the_obj = yaffs_alloc_empty_obj(dev);
1931 	if (!the_obj) {
1932 		if (tn)
1933 			yaffs_free_tnode(dev, tn);
1934 		return NULL;
1935 	}
1936 
1937 	the_obj->fake = 0;
1938 	the_obj->rename_allowed = 1;
1939 	the_obj->unlink_allowed = 1;
1940 	the_obj->obj_id = number;
1941 	yaffs_hash_obj(the_obj);
1942 	the_obj->variant_type = type;
1943 	yaffs_load_current_time(the_obj, 1, 1);
1944 
1945 	switch (type) {
1946 	case YAFFS_OBJECT_TYPE_FILE:
1947 		the_obj->variant.file_variant.file_size = 0;
1948 		the_obj->variant.file_variant.scanned_size = 0;
1949 		the_obj->variant.file_variant.shrink_size =
1950 						yaffs_max_file_size(dev);
1951 		the_obj->variant.file_variant.top_level = 0;
1952 		the_obj->variant.file_variant.top = tn;
1953 		break;
1954 	case YAFFS_OBJECT_TYPE_DIRECTORY:
1955 		INIT_LIST_HEAD(&the_obj->variant.dir_variant.children);
1956 		INIT_LIST_HEAD(&the_obj->variant.dir_variant.dirty);
1957 		break;
1958 	case YAFFS_OBJECT_TYPE_SYMLINK:
1959 	case YAFFS_OBJECT_TYPE_HARDLINK:
1960 	case YAFFS_OBJECT_TYPE_SPECIAL:
1961 		/* No action required */
1962 		break;
1963 	case YAFFS_OBJECT_TYPE_UNKNOWN:
1964 		/* todo this should not happen */
1965 		break;
1966 	}
1967 	return the_obj;
1968 }
1969 
yaffs_create_fake_dir(struct yaffs_dev * dev,int number,u32 mode)1970 static struct yaffs_obj *yaffs_create_fake_dir(struct yaffs_dev *dev,
1971 					       int number, u32 mode)
1972 {
1973 
1974 	struct yaffs_obj *obj =
1975 	    yaffs_new_obj(dev, number, YAFFS_OBJECT_TYPE_DIRECTORY);
1976 
1977 	if (!obj)
1978 		return NULL;
1979 
1980 	obj->fake = 1;	/* it is fake so it might not use NAND */
1981 	obj->rename_allowed = 0;
1982 	obj->unlink_allowed = 0;
1983 	obj->deleted = 0;
1984 	obj->unlinked = 0;
1985 	obj->yst_mode = mode;
1986 	obj->my_dev = dev;
1987 	obj->hdr_chunk = 0;	/* Not a valid chunk. */
1988 	return obj;
1989 
1990 }
1991 
1992 
yaffs_init_tnodes_and_objs(struct yaffs_dev * dev)1993 static void yaffs_init_tnodes_and_objs(struct yaffs_dev *dev)
1994 {
1995 	int i;
1996 
1997 	dev->n_obj = 0;
1998 	dev->n_tnodes = 0;
1999 	yaffs_init_raw_tnodes_and_objs(dev);
2000 
2001 	for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
2002 		INIT_LIST_HEAD(&dev->obj_bucket[i].list);
2003 		dev->obj_bucket[i].count = 0;
2004 	}
2005 }
2006 
yaffs_find_or_create_by_number(struct yaffs_dev * dev,int number,enum yaffs_obj_type type)2007 struct yaffs_obj *yaffs_find_or_create_by_number(struct yaffs_dev *dev,
2008 						 int number,
2009 						 enum yaffs_obj_type type)
2010 {
2011 	struct yaffs_obj *the_obj = NULL;
2012 
2013 	if (number > 0)
2014 		the_obj = yaffs_find_by_number(dev, number);
2015 
2016 	if (!the_obj)
2017 		the_obj = yaffs_new_obj(dev, number, type);
2018 
2019 	return the_obj;
2020 
2021 }
2022 
yaffs_clone_str(const YCHAR * str)2023 YCHAR *yaffs_clone_str(const YCHAR *str)
2024 {
2025 	YCHAR *new_str = NULL;
2026 	int len;
2027 
2028 	if (!str)
2029 		str = _Y("");
2030 
2031 	len = strnlen(str, YAFFS_MAX_ALIAS_LENGTH);
2032 	new_str = kmalloc((len + 1) * sizeof(YCHAR), GFP_NOFS);
2033 	if (new_str) {
2034 		strncpy(new_str, str, len);
2035 		new_str[len] = 0;
2036 	}
2037 	return new_str;
2038 
2039 }
2040 /*
2041  *yaffs_update_parent() handles fixing a directories mtime and ctime when a new
2042  * link (ie. name) is created or deleted in the directory.
2043  *
2044  * ie.
2045  *   create dir/a : update dir's mtime/ctime
2046  *   rm dir/a:   update dir's mtime/ctime
2047  *   modify dir/a: don't update dir's mtimme/ctime
2048  *
2049  * This can be handled immediately or defered. Defering helps reduce the number
2050  * of updates when many files in a directory are changed within a brief period.
2051  *
2052  * If the directory updating is defered then yaffs_update_dirty_dirs must be
2053  * called periodically.
2054  */
2055 
yaffs_update_parent(struct yaffs_obj * obj)2056 static void yaffs_update_parent(struct yaffs_obj *obj)
2057 {
2058 	struct yaffs_dev *dev;
2059 
2060 	if (!obj)
2061 		return;
2062 	dev = obj->my_dev;
2063 	obj->dirty = 1;
2064 	yaffs_load_current_time(obj, 0, 1);
2065 	if (dev->param.defered_dir_update) {
2066 		struct list_head *link = &obj->variant.dir_variant.dirty;
2067 
2068 		if (list_empty(link)) {
2069 			list_add(link, &dev->dirty_dirs);
2070 			yaffs_trace(YAFFS_TRACE_BACKGROUND,
2071 			  "Added object %d to dirty directories",
2072 			   obj->obj_id);
2073 		}
2074 
2075 	} else {
2076 		yaffs_update_oh(obj, NULL, 0, 0, 0, NULL);
2077 	}
2078 }
2079 
yaffs_update_dirty_dirs(struct yaffs_dev * dev)2080 void yaffs_update_dirty_dirs(struct yaffs_dev *dev)
2081 {
2082 	struct list_head *link;
2083 	struct yaffs_obj *obj;
2084 	struct yaffs_dir_var *d_s;
2085 	union yaffs_obj_var *o_v;
2086 
2087 	yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update dirty directories");
2088 
2089 	while (!list_empty(&dev->dirty_dirs)) {
2090 		link = dev->dirty_dirs.next;
2091 		list_del_init(link);
2092 
2093 		d_s = list_entry(link, struct yaffs_dir_var, dirty);
2094 		o_v = list_entry(d_s, union yaffs_obj_var, dir_variant);
2095 		obj = list_entry(o_v, struct yaffs_obj, variant);
2096 
2097 		yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update directory %d",
2098 			obj->obj_id);
2099 
2100 		if (obj->dirty)
2101 			yaffs_update_oh(obj, NULL, 0, 0, 0, NULL);
2102 	}
2103 }
2104 
2105 /*
2106  * Mknod (create) a new object.
2107  * equiv_obj only has meaning for a hard link;
2108  * alias_str only has meaning for a symlink.
2109  * rdev only has meaning for devices (a subset of special objects)
2110  */
2111 
yaffs_create_obj(enum yaffs_obj_type type,struct yaffs_obj * parent,const YCHAR * name,u32 mode,u32 uid,u32 gid,struct yaffs_obj * equiv_obj,const YCHAR * alias_str,u32 rdev)2112 static struct yaffs_obj *yaffs_create_obj(enum yaffs_obj_type type,
2113 					  struct yaffs_obj *parent,
2114 					  const YCHAR *name,
2115 					  u32 mode,
2116 					  u32 uid,
2117 					  u32 gid,
2118 					  struct yaffs_obj *equiv_obj,
2119 					  const YCHAR *alias_str, u32 rdev)
2120 {
2121 	struct yaffs_obj *in;
2122 	YCHAR *str = NULL;
2123 	struct yaffs_dev *dev = parent->my_dev;
2124 
2125 	/* Check if the entry exists.
2126 	 * If it does then fail the call since we don't want a dup. */
2127 	if (yaffs_find_by_name(parent, name))
2128 		return NULL;
2129 
2130 	if (type == YAFFS_OBJECT_TYPE_SYMLINK) {
2131 		str = yaffs_clone_str(alias_str);
2132 		if (!str)
2133 			return NULL;
2134 	}
2135 
2136 	in = yaffs_new_obj(dev, -1, type);
2137 
2138 	if (!in) {
2139 		kfree(str);
2140 		return NULL;
2141 	}
2142 
2143 	in->hdr_chunk = 0;
2144 	in->valid = 1;
2145 	in->variant_type = type;
2146 
2147 	in->yst_mode = mode;
2148 
2149 	yaffs_attribs_init(in, gid, uid, rdev);
2150 
2151 	in->n_data_chunks = 0;
2152 
2153 	yaffs_set_obj_name(in, name);
2154 	in->dirty = 1;
2155 
2156 	yaffs_add_obj_to_dir(parent, in);
2157 
2158 	in->my_dev = parent->my_dev;
2159 
2160 	switch (type) {
2161 	case YAFFS_OBJECT_TYPE_SYMLINK:
2162 		in->variant.symlink_variant.alias = str;
2163 		break;
2164 	case YAFFS_OBJECT_TYPE_HARDLINK:
2165 		in->variant.hardlink_variant.equiv_obj = equiv_obj;
2166 		in->variant.hardlink_variant.equiv_id = equiv_obj->obj_id;
2167 		list_add(&in->hard_links, &equiv_obj->hard_links);
2168 		break;
2169 	case YAFFS_OBJECT_TYPE_FILE:
2170 	case YAFFS_OBJECT_TYPE_DIRECTORY:
2171 	case YAFFS_OBJECT_TYPE_SPECIAL:
2172 	case YAFFS_OBJECT_TYPE_UNKNOWN:
2173 		/* do nothing */
2174 		break;
2175 	}
2176 
2177 	if (yaffs_update_oh(in, name, 0, 0, 0, NULL) < 0) {
2178 		/* Could not create the object header, fail */
2179 		yaffs_del_obj(in);
2180 		in = NULL;
2181 	}
2182 
2183 	if (in)
2184 		yaffs_update_parent(parent);
2185 
2186 	return in;
2187 }
2188 
yaffs_create_file(struct yaffs_obj * parent,const YCHAR * name,u32 mode,u32 uid,u32 gid)2189 struct yaffs_obj *yaffs_create_file(struct yaffs_obj *parent,
2190 				    const YCHAR *name, u32 mode, u32 uid,
2191 				    u32 gid)
2192 {
2193 	return yaffs_create_obj(YAFFS_OBJECT_TYPE_FILE, parent, name, mode,
2194 				uid, gid, NULL, NULL, 0);
2195 }
2196 
yaffs_create_dir(struct yaffs_obj * parent,const YCHAR * name,u32 mode,u32 uid,u32 gid)2197 struct yaffs_obj *yaffs_create_dir(struct yaffs_obj *parent, const YCHAR *name,
2198 				   u32 mode, u32 uid, u32 gid)
2199 {
2200 	return yaffs_create_obj(YAFFS_OBJECT_TYPE_DIRECTORY, parent, name,
2201 				mode, uid, gid, NULL, NULL, 0);
2202 }
2203 
yaffs_create_special(struct yaffs_obj * parent,const YCHAR * name,u32 mode,u32 uid,u32 gid,u32 rdev)2204 struct yaffs_obj *yaffs_create_special(struct yaffs_obj *parent,
2205 				       const YCHAR *name, u32 mode, u32 uid,
2206 				       u32 gid, u32 rdev)
2207 {
2208 	return yaffs_create_obj(YAFFS_OBJECT_TYPE_SPECIAL, parent, name, mode,
2209 				uid, gid, NULL, NULL, rdev);
2210 }
2211 
yaffs_create_symlink(struct yaffs_obj * parent,const YCHAR * name,u32 mode,u32 uid,u32 gid,const YCHAR * alias)2212 struct yaffs_obj *yaffs_create_symlink(struct yaffs_obj *parent,
2213 				       const YCHAR *name, u32 mode, u32 uid,
2214 				       u32 gid, const YCHAR *alias)
2215 {
2216 	return yaffs_create_obj(YAFFS_OBJECT_TYPE_SYMLINK, parent, name, mode,
2217 				uid, gid, NULL, alias, 0);
2218 }
2219 
2220 /* yaffs_link_obj returns the object id of the equivalent object.*/
yaffs_link_obj(struct yaffs_obj * parent,const YCHAR * name,struct yaffs_obj * equiv_obj)2221 struct yaffs_obj *yaffs_link_obj(struct yaffs_obj *parent, const YCHAR * name,
2222 				 struct yaffs_obj *equiv_obj)
2223 {
2224 	/* Get the real object in case we were fed a hard link obj */
2225 	equiv_obj = yaffs_get_equivalent_obj(equiv_obj);
2226 
2227 	if (yaffs_create_obj(YAFFS_OBJECT_TYPE_HARDLINK,
2228 			parent, name, 0, 0, 0,
2229 			equiv_obj, NULL, 0))
2230 		return equiv_obj;
2231 
2232 	return NULL;
2233 
2234 }
2235 
2236 
2237 
2238 /*---------------------- Block Management and Page Allocation -------------*/
2239 
yaffs_deinit_blocks(struct yaffs_dev * dev)2240 static void yaffs_deinit_blocks(struct yaffs_dev *dev)
2241 {
2242 	if (dev->block_info_alt && dev->block_info)
2243 		vfree(dev->block_info);
2244 	else
2245 		kfree(dev->block_info);
2246 
2247 	dev->block_info_alt = 0;
2248 
2249 	dev->block_info = NULL;
2250 
2251 	if (dev->chunk_bits_alt && dev->chunk_bits)
2252 		vfree(dev->chunk_bits);
2253 	else
2254 		kfree(dev->chunk_bits);
2255 	dev->chunk_bits_alt = 0;
2256 	dev->chunk_bits = NULL;
2257 }
2258 
yaffs_init_blocks(struct yaffs_dev * dev)2259 static int yaffs_init_blocks(struct yaffs_dev *dev)
2260 {
2261 	int n_blocks = dev->internal_end_block - dev->internal_start_block + 1;
2262 
2263 	dev->block_info = NULL;
2264 	dev->chunk_bits = NULL;
2265 	dev->alloc_block = -1;	/* force it to get a new one */
2266 
2267 	/* If the first allocation strategy fails, thry the alternate one */
2268 	dev->block_info =
2269 		kmalloc(n_blocks * sizeof(struct yaffs_block_info), GFP_NOFS);
2270 	if (!dev->block_info) {
2271 		dev->block_info =
2272 		    vmalloc(n_blocks * sizeof(struct yaffs_block_info));
2273 		dev->block_info_alt = 1;
2274 	} else {
2275 		dev->block_info_alt = 0;
2276 	}
2277 
2278 	if (!dev->block_info)
2279 		goto alloc_error;
2280 
2281 	/* Set up dynamic blockinfo stuff. Round up bytes. */
2282 	dev->chunk_bit_stride = (dev->param.chunks_per_block + 7) / 8;
2283 	dev->chunk_bits =
2284 		kmalloc(dev->chunk_bit_stride * n_blocks, GFP_NOFS);
2285 	if (!dev->chunk_bits) {
2286 		dev->chunk_bits =
2287 		    vmalloc(dev->chunk_bit_stride * n_blocks);
2288 		dev->chunk_bits_alt = 1;
2289 	} else {
2290 		dev->chunk_bits_alt = 0;
2291 	}
2292 	if (!dev->chunk_bits)
2293 		goto alloc_error;
2294 
2295 
2296 	memset(dev->block_info, 0, n_blocks * sizeof(struct yaffs_block_info));
2297 	memset(dev->chunk_bits, 0, dev->chunk_bit_stride * n_blocks);
2298 	return YAFFS_OK;
2299 
2300 alloc_error:
2301 	yaffs_deinit_blocks(dev);
2302 	return YAFFS_FAIL;
2303 }
2304 
2305 
yaffs_block_became_dirty(struct yaffs_dev * dev,int block_no)2306 void yaffs_block_became_dirty(struct yaffs_dev *dev, int block_no)
2307 {
2308 	struct yaffs_block_info *bi = yaffs_get_block_info(dev, block_no);
2309 	int erased_ok = 0;
2310 	int i;
2311 
2312 	/* If the block is still healthy erase it and mark as clean.
2313 	 * If the block has had a data failure, then retire it.
2314 	 */
2315 
2316 	yaffs_trace(YAFFS_TRACE_GC | YAFFS_TRACE_ERASE,
2317 		"yaffs_block_became_dirty block %d state %d %s",
2318 		block_no, bi->block_state,
2319 		(bi->needs_retiring) ? "needs retiring" : "");
2320 
2321 	yaffs2_clear_oldest_dirty_seq(dev, bi);
2322 
2323 	bi->block_state = YAFFS_BLOCK_STATE_DIRTY;
2324 
2325 	/* If this is the block being garbage collected then stop gc'ing */
2326 	if (block_no == dev->gc_block)
2327 		dev->gc_block = 0;
2328 
2329 	/* If this block is currently the best candidate for gc
2330 	 * then drop as a candidate */
2331 	if (block_no == dev->gc_dirtiest) {
2332 		dev->gc_dirtiest = 0;
2333 		dev->gc_pages_in_use = 0;
2334 	}
2335 
2336 	if (!bi->needs_retiring) {
2337 		yaffs2_checkpt_invalidate(dev);
2338 		erased_ok = yaffs_erase_block(dev, block_no);
2339 		if (!erased_ok) {
2340 			dev->n_erase_failures++;
2341 			yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
2342 			  "**>> Erasure failed %d", block_no);
2343 		}
2344 	}
2345 
2346 	/* Verify erasure if needed */
2347 	if (erased_ok &&
2348 	    ((yaffs_trace_mask & YAFFS_TRACE_ERASE) ||
2349 	     !yaffs_skip_verification(dev))) {
2350 		for (i = 0; i < dev->param.chunks_per_block; i++) {
2351 			if (!yaffs_check_chunk_erased(dev,
2352 				block_no * dev->param.chunks_per_block + i)) {
2353 				yaffs_trace(YAFFS_TRACE_ERROR,
2354 					">>Block %d erasure supposedly OK, but chunk %d not erased",
2355 					block_no, i);
2356 			}
2357 		}
2358 	}
2359 
2360 	if (!erased_ok) {
2361 		/* We lost a block of free space */
2362 		dev->n_free_chunks -= dev->param.chunks_per_block;
2363 		yaffs_retire_block(dev, block_no);
2364 		yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
2365 			"**>> Block %d retired", block_no);
2366 		return;
2367 	}
2368 
2369 	/* Clean it up... */
2370 	bi->block_state = YAFFS_BLOCK_STATE_EMPTY;
2371 	bi->seq_number = 0;
2372 	dev->n_erased_blocks++;
2373 	bi->pages_in_use = 0;
2374 	bi->soft_del_pages = 0;
2375 	bi->has_shrink_hdr = 0;
2376 	bi->skip_erased_check = 1;	/* Clean, so no need to check */
2377 	bi->gc_prioritise = 0;
2378 	bi->has_summary = 0;
2379 
2380 	yaffs_clear_chunk_bits(dev, block_no);
2381 
2382 	yaffs_trace(YAFFS_TRACE_ERASE, "Erased block %d", block_no);
2383 }
2384 
yaffs_gc_process_chunk(struct yaffs_dev * dev,struct yaffs_block_info * bi,int old_chunk,u8 * buffer)2385 static inline int yaffs_gc_process_chunk(struct yaffs_dev *dev,
2386 					struct yaffs_block_info *bi,
2387 					int old_chunk, u8 *buffer)
2388 {
2389 	int new_chunk;
2390 	int mark_flash = 1;
2391 	struct yaffs_ext_tags tags;
2392 	struct yaffs_obj *object;
2393 	int matching_chunk;
2394 	int ret_val = YAFFS_OK;
2395 
2396 	memset(&tags, 0, sizeof(tags));
2397 	yaffs_rd_chunk_tags_nand(dev, old_chunk,
2398 				 buffer, &tags);
2399 	object = yaffs_find_by_number(dev, tags.obj_id);
2400 
2401 	yaffs_trace(YAFFS_TRACE_GC_DETAIL,
2402 		"Collecting chunk in block %d, %d %d %d ",
2403 		dev->gc_chunk, tags.obj_id,
2404 		tags.chunk_id, tags.n_bytes);
2405 
2406 	if (object && !yaffs_skip_verification(dev)) {
2407 		if (tags.chunk_id == 0)
2408 			matching_chunk =
2409 			    object->hdr_chunk;
2410 		else if (object->soft_del)
2411 			/* Defeat the test */
2412 			matching_chunk = old_chunk;
2413 		else
2414 			matching_chunk =
2415 			    yaffs_find_chunk_in_file
2416 			    (object, tags.chunk_id,
2417 			     NULL);
2418 
2419 		if (old_chunk != matching_chunk)
2420 			yaffs_trace(YAFFS_TRACE_ERROR,
2421 				"gc: page in gc mismatch: %d %d %d %d",
2422 				old_chunk,
2423 				matching_chunk,
2424 				tags.obj_id,
2425 				tags.chunk_id);
2426 	}
2427 
2428 	if (!object) {
2429 		yaffs_trace(YAFFS_TRACE_ERROR,
2430 			"page %d in gc has no object: %d %d %d ",
2431 			old_chunk,
2432 			tags.obj_id, tags.chunk_id,
2433 			tags.n_bytes);
2434 	}
2435 
2436 	if (object &&
2437 	    object->deleted &&
2438 	    object->soft_del && tags.chunk_id != 0) {
2439 		/* Data chunk in a soft deleted file,
2440 		 * throw it away.
2441 		 * It's a soft deleted data chunk,
2442 		 * No need to copy this, just forget
2443 		 * about it and fix up the object.
2444 		 */
2445 
2446 		/* Free chunks already includes
2447 		 * softdeleted chunks, how ever this
2448 		 * chunk is going to soon be really
2449 		 * deleted which will increment free
2450 		 * chunks. We have to decrement free
2451 		 * chunks so this works out properly.
2452 		 */
2453 		dev->n_free_chunks--;
2454 		bi->soft_del_pages--;
2455 
2456 		object->n_data_chunks--;
2457 		if (object->n_data_chunks <= 0) {
2458 			/* remeber to clean up obj */
2459 			dev->gc_cleanup_list[dev->n_clean_ups] = tags.obj_id;
2460 			dev->n_clean_ups++;
2461 		}
2462 		mark_flash = 0;
2463 	} else if (object) {
2464 		/* It's either a data chunk in a live
2465 		 * file or an ObjectHeader, so we're
2466 		 * interested in it.
2467 		 * NB Need to keep the ObjectHeaders of
2468 		 * deleted files until the whole file
2469 		 * has been deleted off
2470 		 */
2471 		tags.serial_number++;
2472 		dev->n_gc_copies++;
2473 
2474 		if (tags.chunk_id == 0) {
2475 			/* It is an object Id,
2476 			 * We need to nuke the
2477 			 * shrinkheader flags since its
2478 			 * work is done.
2479 			 * Also need to clean up
2480 			 * shadowing.
2481 			 */
2482 			struct yaffs_obj_hdr *oh;
2483 			oh = (struct yaffs_obj_hdr *) buffer;
2484 
2485 			oh->is_shrink = 0;
2486 			tags.extra_is_shrink = 0;
2487 			oh->shadows_obj = 0;
2488 			oh->inband_shadowed_obj_id = 0;
2489 			tags.extra_shadows = 0;
2490 
2491 			/* Update file size */
2492 			if (object->variant_type == YAFFS_OBJECT_TYPE_FILE) {
2493 				yaffs_oh_size_load(oh,
2494 				    object->variant.file_variant.file_size);
2495 				tags.extra_file_size =
2496 				    object->variant.file_variant.file_size;
2497 			}
2498 
2499 			yaffs_verify_oh(object, oh, &tags, 1);
2500 			new_chunk =
2501 			    yaffs_write_new_chunk(dev, (u8 *) oh, &tags, 1);
2502 		} else {
2503 			new_chunk =
2504 			    yaffs_write_new_chunk(dev, buffer, &tags, 1);
2505 		}
2506 
2507 		if (new_chunk < 0) {
2508 			ret_val = YAFFS_FAIL;
2509 		} else {
2510 
2511 			/* Now fix up the Tnodes etc. */
2512 
2513 			if (tags.chunk_id == 0) {
2514 				/* It's a header */
2515 				object->hdr_chunk = new_chunk;
2516 				object->serial = tags.serial_number;
2517 			} else {
2518 				/* It's a data chunk */
2519 				yaffs_put_chunk_in_file(object, tags.chunk_id,
2520 							new_chunk, 0);
2521 			}
2522 		}
2523 	}
2524 	if (ret_val == YAFFS_OK)
2525 		yaffs_chunk_del(dev, old_chunk, mark_flash, __LINE__);
2526 	return ret_val;
2527 }
2528 
yaffs_gc_block(struct yaffs_dev * dev,int block,int whole_block)2529 static int yaffs_gc_block(struct yaffs_dev *dev, int block, int whole_block)
2530 {
2531 	int old_chunk;
2532 	int ret_val = YAFFS_OK;
2533 	int i;
2534 	int is_checkpt_block;
2535 	int max_copies;
2536 	int chunks_before = yaffs_get_erased_chunks(dev);
2537 	int chunks_after;
2538 	struct yaffs_block_info *bi = yaffs_get_block_info(dev, block);
2539 
2540 	is_checkpt_block = (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT);
2541 
2542 	yaffs_trace(YAFFS_TRACE_TRACING,
2543 		"Collecting block %d, in use %d, shrink %d, whole_block %d",
2544 		block, bi->pages_in_use, bi->has_shrink_hdr,
2545 		whole_block);
2546 
2547 	/*yaffs_verify_free_chunks(dev); */
2548 
2549 	if (bi->block_state == YAFFS_BLOCK_STATE_FULL)
2550 		bi->block_state = YAFFS_BLOCK_STATE_COLLECTING;
2551 
2552 	bi->has_shrink_hdr = 0;	/* clear the flag so that the block can erase */
2553 
2554 	dev->gc_disable = 1;
2555 
2556 	yaffs_summary_gc(dev, block);
2557 
2558 	if (is_checkpt_block || !yaffs_still_some_chunks(dev, block)) {
2559 		yaffs_trace(YAFFS_TRACE_TRACING,
2560 			"Collecting block %d that has no chunks in use",
2561 			block);
2562 		yaffs_block_became_dirty(dev, block);
2563 	} else {
2564 
2565 		u8 *buffer = yaffs_get_temp_buffer(dev);
2566 
2567 		yaffs_verify_blk(dev, bi, block);
2568 
2569 		max_copies = (whole_block) ? dev->param.chunks_per_block : 5;
2570 		old_chunk = block * dev->param.chunks_per_block + dev->gc_chunk;
2571 
2572 		for (/* init already done */ ;
2573 		     ret_val == YAFFS_OK &&
2574 		     dev->gc_chunk < dev->param.chunks_per_block &&
2575 		     (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) &&
2576 		     max_copies > 0;
2577 		     dev->gc_chunk++, old_chunk++) {
2578 			if (yaffs_check_chunk_bit(dev, block, dev->gc_chunk)) {
2579 				/* Page is in use and might need to be copied */
2580 				max_copies--;
2581 				ret_val = yaffs_gc_process_chunk(dev, bi,
2582 							old_chunk, buffer);
2583 			}
2584 		}
2585 		yaffs_release_temp_buffer(dev, buffer);
2586 	}
2587 
2588 	yaffs_verify_collected_blk(dev, bi, block);
2589 
2590 	if (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) {
2591 		/*
2592 		 * The gc did not complete. Set block state back to FULL
2593 		 * because checkpointing does not restore gc.
2594 		 */
2595 		bi->block_state = YAFFS_BLOCK_STATE_FULL;
2596 	} else {
2597 		/* The gc completed. */
2598 		/* Do any required cleanups */
2599 		for (i = 0; i < dev->n_clean_ups; i++) {
2600 			/* Time to delete the file too */
2601 			struct yaffs_obj *object =
2602 			    yaffs_find_by_number(dev, dev->gc_cleanup_list[i]);
2603 			if (object) {
2604 				yaffs_free_tnode(dev,
2605 					  object->variant.file_variant.top);
2606 				object->variant.file_variant.top = NULL;
2607 				yaffs_trace(YAFFS_TRACE_GC,
2608 					"yaffs: About to finally delete object %d",
2609 					object->obj_id);
2610 				yaffs_generic_obj_del(object);
2611 				object->my_dev->n_deleted_files--;
2612 			}
2613 
2614 		}
2615 		chunks_after = yaffs_get_erased_chunks(dev);
2616 		if (chunks_before >= chunks_after)
2617 			yaffs_trace(YAFFS_TRACE_GC,
2618 				"gc did not increase free chunks before %d after %d",
2619 				chunks_before, chunks_after);
2620 		dev->gc_block = 0;
2621 		dev->gc_chunk = 0;
2622 		dev->n_clean_ups = 0;
2623 	}
2624 
2625 	dev->gc_disable = 0;
2626 
2627 	return ret_val;
2628 }
2629 
2630 /*
2631  * find_gc_block() selects the dirtiest block (or close enough)
2632  * for garbage collection.
2633  */
2634 
yaffs_find_gc_block(struct yaffs_dev * dev,int aggressive,int background)2635 static unsigned yaffs_find_gc_block(struct yaffs_dev *dev,
2636 				    int aggressive, int background)
2637 {
2638 	int i;
2639 	int iterations;
2640 	unsigned selected = 0;
2641 	int prioritised = 0;
2642 	int prioritised_exist = 0;
2643 	struct yaffs_block_info *bi;
2644 	int threshold;
2645 
2646 	/* First let's see if we need to grab a prioritised block */
2647 	if (dev->has_pending_prioritised_gc && !aggressive) {
2648 		dev->gc_dirtiest = 0;
2649 		bi = dev->block_info;
2650 		for (i = dev->internal_start_block;
2651 		     i <= dev->internal_end_block && !selected; i++) {
2652 
2653 			if (bi->gc_prioritise) {
2654 				prioritised_exist = 1;
2655 				if (bi->block_state == YAFFS_BLOCK_STATE_FULL &&
2656 				    yaffs_block_ok_for_gc(dev, bi)) {
2657 					selected = i;
2658 					prioritised = 1;
2659 				}
2660 			}
2661 			bi++;
2662 		}
2663 
2664 		/*
2665 		 * If there is a prioritised block and none was selected then
2666 		 * this happened because there is at least one old dirty block
2667 		 * gumming up the works. Let's gc the oldest dirty block.
2668 		 */
2669 
2670 		if (prioritised_exist &&
2671 		    !selected && dev->oldest_dirty_block > 0)
2672 			selected = dev->oldest_dirty_block;
2673 
2674 		if (!prioritised_exist)	/* None found, so we can clear this */
2675 			dev->has_pending_prioritised_gc = 0;
2676 	}
2677 
2678 	/* If we're doing aggressive GC then we are happy to take a less-dirty
2679 	 * block, and search harder.
2680 	 * else (leasurely gc), then we only bother to do this if the
2681 	 * block has only a few pages in use.
2682 	 */
2683 
2684 	if (!selected) {
2685 		int pages_used;
2686 		int n_blocks =
2687 		    dev->internal_end_block - dev->internal_start_block + 1;
2688 		if (aggressive) {
2689 			threshold = dev->param.chunks_per_block;
2690 			iterations = n_blocks;
2691 		} else {
2692 			int max_threshold;
2693 
2694 			if (background)
2695 				max_threshold = dev->param.chunks_per_block / 2;
2696 			else
2697 				max_threshold = dev->param.chunks_per_block / 8;
2698 
2699 			if (max_threshold < YAFFS_GC_PASSIVE_THRESHOLD)
2700 				max_threshold = YAFFS_GC_PASSIVE_THRESHOLD;
2701 
2702 			threshold = background ? (dev->gc_not_done + 2) * 2 : 0;
2703 			if (threshold < YAFFS_GC_PASSIVE_THRESHOLD)
2704 				threshold = YAFFS_GC_PASSIVE_THRESHOLD;
2705 			if (threshold > max_threshold)
2706 				threshold = max_threshold;
2707 
2708 			iterations = n_blocks / 16 + 1;
2709 			if (iterations > 100)
2710 				iterations = 100;
2711 		}
2712 
2713 		for (i = 0;
2714 		     i < iterations &&
2715 		     (dev->gc_dirtiest < 1 ||
2716 		      dev->gc_pages_in_use > YAFFS_GC_GOOD_ENOUGH);
2717 		     i++) {
2718 			dev->gc_block_finder++;
2719 			if (dev->gc_block_finder < dev->internal_start_block ||
2720 			    dev->gc_block_finder > dev->internal_end_block)
2721 				dev->gc_block_finder =
2722 				    dev->internal_start_block;
2723 
2724 			bi = yaffs_get_block_info(dev, dev->gc_block_finder);
2725 
2726 			pages_used = bi->pages_in_use - bi->soft_del_pages;
2727 
2728 			if (bi->block_state == YAFFS_BLOCK_STATE_FULL &&
2729 			    pages_used < dev->param.chunks_per_block &&
2730 			    (dev->gc_dirtiest < 1 ||
2731 			     pages_used < dev->gc_pages_in_use) &&
2732 			    yaffs_block_ok_for_gc(dev, bi)) {
2733 				dev->gc_dirtiest = dev->gc_block_finder;
2734 				dev->gc_pages_in_use = pages_used;
2735 			}
2736 		}
2737 
2738 		if (dev->gc_dirtiest > 0 && dev->gc_pages_in_use <= threshold)
2739 			selected = dev->gc_dirtiest;
2740 	}
2741 
2742 	/*
2743 	 * If nothing has been selected for a while, try the oldest dirty
2744 	 * because that's gumming up the works.
2745 	 */
2746 
2747 	if (!selected && dev->param.is_yaffs2 &&
2748 	    dev->gc_not_done >= (background ? 10 : 20)) {
2749 		yaffs2_find_oldest_dirty_seq(dev);
2750 		if (dev->oldest_dirty_block > 0) {
2751 			selected = dev->oldest_dirty_block;
2752 			dev->gc_dirtiest = selected;
2753 			dev->oldest_dirty_gc_count++;
2754 			bi = yaffs_get_block_info(dev, selected);
2755 			dev->gc_pages_in_use =
2756 			    bi->pages_in_use - bi->soft_del_pages;
2757 		} else {
2758 			dev->gc_not_done = 0;
2759 		}
2760 	}
2761 
2762 	if (selected) {
2763 		yaffs_trace(YAFFS_TRACE_GC,
2764 			"GC Selected block %d with %d free, prioritised:%d",
2765 			selected,
2766 			dev->param.chunks_per_block - dev->gc_pages_in_use,
2767 			prioritised);
2768 
2769 		dev->n_gc_blocks++;
2770 		if (background)
2771 			dev->bg_gcs++;
2772 
2773 		dev->gc_dirtiest = 0;
2774 		dev->gc_pages_in_use = 0;
2775 		dev->gc_not_done = 0;
2776 		if (dev->refresh_skip > 0)
2777 			dev->refresh_skip--;
2778 	} else {
2779 		dev->gc_not_done++;
2780 		yaffs_trace(YAFFS_TRACE_GC,
2781 			"GC none: finder %d skip %d threshold %d dirtiest %d using %d oldest %d%s",
2782 			dev->gc_block_finder, dev->gc_not_done, threshold,
2783 			dev->gc_dirtiest, dev->gc_pages_in_use,
2784 			dev->oldest_dirty_block, background ? " bg" : "");
2785 	}
2786 
2787 	return selected;
2788 }
2789 
2790 /* New garbage collector
2791  * If we're very low on erased blocks then we do aggressive garbage collection
2792  * otherwise we do "leasurely" garbage collection.
2793  * Aggressive gc looks further (whole array) and will accept less dirty blocks.
2794  * Passive gc only inspects smaller areas and only accepts more dirty blocks.
2795  *
2796  * The idea is to help clear out space in a more spread-out manner.
2797  * Dunno if it really does anything useful.
2798  */
yaffs_check_gc(struct yaffs_dev * dev,int background)2799 static int yaffs_check_gc(struct yaffs_dev *dev, int background)
2800 {
2801 	int aggressive = 0;
2802 	int gc_ok = YAFFS_OK;
2803 	int max_tries = 0;
2804 	int min_erased;
2805 	int erased_chunks;
2806 	int checkpt_block_adjust;
2807 
2808 	if (dev->param.gc_control_fn &&
2809 		(dev->param.gc_control_fn(dev) & 1) == 0)
2810 		return YAFFS_OK;
2811 
2812 	if (dev->gc_disable)
2813 		/* Bail out so we don't get recursive gc */
2814 		return YAFFS_OK;
2815 
2816 	/* This loop should pass the first time.
2817 	 * Only loops here if the collection does not increase space.
2818 	 */
2819 
2820 	do {
2821 		max_tries++;
2822 
2823 		checkpt_block_adjust = yaffs_calc_checkpt_blocks_required(dev);
2824 
2825 		min_erased =
2826 		    dev->param.n_reserved_blocks + checkpt_block_adjust + 1;
2827 		erased_chunks =
2828 		    dev->n_erased_blocks * dev->param.chunks_per_block;
2829 
2830 		/* If we need a block soon then do aggressive gc. */
2831 		if (dev->n_erased_blocks < min_erased)
2832 			aggressive = 1;
2833 		else {
2834 			if (!background
2835 			    && erased_chunks > (dev->n_free_chunks / 4))
2836 				break;
2837 
2838 			if (dev->gc_skip > 20)
2839 				dev->gc_skip = 20;
2840 			if (erased_chunks < dev->n_free_chunks / 2 ||
2841 			    dev->gc_skip < 1 || background)
2842 				aggressive = 0;
2843 			else {
2844 				dev->gc_skip--;
2845 				break;
2846 			}
2847 		}
2848 
2849 		dev->gc_skip = 5;
2850 
2851 		/* If we don't already have a block being gc'd then see if we
2852 		 * should start another */
2853 
2854 		if (dev->gc_block < 1 && !aggressive) {
2855 			dev->gc_block = yaffs2_find_refresh_block(dev);
2856 			dev->gc_chunk = 0;
2857 			dev->n_clean_ups = 0;
2858 		}
2859 		if (dev->gc_block < 1) {
2860 			dev->gc_block =
2861 			    yaffs_find_gc_block(dev, aggressive, background);
2862 			dev->gc_chunk = 0;
2863 			dev->n_clean_ups = 0;
2864 		}
2865 
2866 		if (dev->gc_block > 0) {
2867 			dev->all_gcs++;
2868 			if (!aggressive)
2869 				dev->passive_gc_count++;
2870 
2871 			yaffs_trace(YAFFS_TRACE_GC,
2872 				"yaffs: GC n_erased_blocks %d aggressive %d",
2873 				dev->n_erased_blocks, aggressive);
2874 
2875 			gc_ok = yaffs_gc_block(dev, dev->gc_block, aggressive);
2876 		}
2877 
2878 		if (dev->n_erased_blocks < (dev->param.n_reserved_blocks) &&
2879 		    dev->gc_block > 0) {
2880 			yaffs_trace(YAFFS_TRACE_GC,
2881 				"yaffs: GC !!!no reclaim!!! n_erased_blocks %d after try %d block %d",
2882 				dev->n_erased_blocks, max_tries,
2883 				dev->gc_block);
2884 		}
2885 	} while ((dev->n_erased_blocks < dev->param.n_reserved_blocks) &&
2886 		 (dev->gc_block > 0) && (max_tries < 2));
2887 
2888 	return aggressive ? gc_ok : YAFFS_OK;
2889 }
2890 
2891 /*
2892  * yaffs_bg_gc()
2893  * Garbage collects. Intended to be called from a background thread.
2894  * Returns non-zero if at least half the free chunks are erased.
2895  */
yaffs_bg_gc(struct yaffs_dev * dev,unsigned urgency)2896 int yaffs_bg_gc(struct yaffs_dev *dev, unsigned urgency)
2897 {
2898 	int erased_chunks = dev->n_erased_blocks * dev->param.chunks_per_block;
2899 
2900 	yaffs_trace(YAFFS_TRACE_BACKGROUND, "Background gc %u", urgency);
2901 
2902 	yaffs_check_gc(dev, 1);
2903 	return erased_chunks > dev->n_free_chunks / 2;
2904 }
2905 
2906 /*-------------------- Data file manipulation -----------------*/
2907 
yaffs_rd_data_obj(struct yaffs_obj * in,int inode_chunk,u8 * buffer)2908 static int yaffs_rd_data_obj(struct yaffs_obj *in, int inode_chunk, u8 * buffer)
2909 {
2910 	int nand_chunk = yaffs_find_chunk_in_file(in, inode_chunk, NULL);
2911 
2912 	if (nand_chunk >= 0)
2913 		return yaffs_rd_chunk_tags_nand(in->my_dev, nand_chunk,
2914 						buffer, NULL);
2915 	else {
2916 		yaffs_trace(YAFFS_TRACE_NANDACCESS,
2917 			"Chunk %d not found zero instead",
2918 			nand_chunk);
2919 		/* get sane (zero) data if you read a hole */
2920 		memset(buffer, 0, in->my_dev->data_bytes_per_chunk);
2921 		return 0;
2922 	}
2923 
2924 }
2925 
yaffs_chunk_del(struct yaffs_dev * dev,int chunk_id,int mark_flash,int lyn)2926 void yaffs_chunk_del(struct yaffs_dev *dev, int chunk_id, int mark_flash,
2927 		     int lyn)
2928 {
2929 	int block;
2930 	int page;
2931 	struct yaffs_ext_tags tags;
2932 	struct yaffs_block_info *bi;
2933 
2934 	if (chunk_id <= 0)
2935 		return;
2936 
2937 	dev->n_deletions++;
2938 	block = chunk_id / dev->param.chunks_per_block;
2939 	page = chunk_id % dev->param.chunks_per_block;
2940 
2941 	if (!yaffs_check_chunk_bit(dev, block, page))
2942 		yaffs_trace(YAFFS_TRACE_VERIFY,
2943 			"Deleting invalid chunk %d", chunk_id);
2944 
2945 	bi = yaffs_get_block_info(dev, block);
2946 
2947 	yaffs2_update_oldest_dirty_seq(dev, block, bi);
2948 
2949 	yaffs_trace(YAFFS_TRACE_DELETION,
2950 		"line %d delete of chunk %d",
2951 		lyn, chunk_id);
2952 
2953 	if (!dev->param.is_yaffs2 && mark_flash &&
2954 	    bi->block_state != YAFFS_BLOCK_STATE_COLLECTING) {
2955 
2956 		memset(&tags, 0, sizeof(tags));
2957 		tags.is_deleted = 1;
2958 		yaffs_wr_chunk_tags_nand(dev, chunk_id, NULL, &tags);
2959 		yaffs_handle_chunk_update(dev, chunk_id, &tags);
2960 	} else {
2961 		dev->n_unmarked_deletions++;
2962 	}
2963 
2964 	/* Pull out of the management area.
2965 	 * If the whole block became dirty, this will kick off an erasure.
2966 	 */
2967 	if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING ||
2968 	    bi->block_state == YAFFS_BLOCK_STATE_FULL ||
2969 	    bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCAN ||
2970 	    bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) {
2971 		dev->n_free_chunks++;
2972 		yaffs_clear_chunk_bit(dev, block, page);
2973 		bi->pages_in_use--;
2974 
2975 		if (bi->pages_in_use == 0 &&
2976 		    !bi->has_shrink_hdr &&
2977 		    bi->block_state != YAFFS_BLOCK_STATE_ALLOCATING &&
2978 		    bi->block_state != YAFFS_BLOCK_STATE_NEEDS_SCAN) {
2979 			yaffs_block_became_dirty(dev, block);
2980 		}
2981 	}
2982 }
2983 
yaffs_wr_data_obj(struct yaffs_obj * in,int inode_chunk,const u8 * buffer,int n_bytes,int use_reserve)2984 static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk,
2985 			     const u8 *buffer, int n_bytes, int use_reserve)
2986 {
2987 	/* Find old chunk Need to do this to get serial number
2988 	 * Write new one and patch into tree.
2989 	 * Invalidate old tags.
2990 	 */
2991 
2992 	int prev_chunk_id;
2993 	struct yaffs_ext_tags prev_tags;
2994 	int new_chunk_id;
2995 	struct yaffs_ext_tags new_tags;
2996 	struct yaffs_dev *dev = in->my_dev;
2997 
2998 	yaffs_check_gc(dev, 0);
2999 
3000 	/* Get the previous chunk at this location in the file if it exists.
3001 	 * If it does not exist then put a zero into the tree. This creates
3002 	 * the tnode now, rather than later when it is harder to clean up.
3003 	 */
3004 	prev_chunk_id = yaffs_find_chunk_in_file(in, inode_chunk, &prev_tags);
3005 	if (prev_chunk_id < 1 &&
3006 	    !yaffs_put_chunk_in_file(in, inode_chunk, 0, 0))
3007 		return 0;
3008 
3009 	/* Set up new tags */
3010 	memset(&new_tags, 0, sizeof(new_tags));
3011 
3012 	new_tags.chunk_id = inode_chunk;
3013 	new_tags.obj_id = in->obj_id;
3014 	new_tags.serial_number =
3015 	    (prev_chunk_id > 0) ? prev_tags.serial_number + 1 : 1;
3016 	new_tags.n_bytes = n_bytes;
3017 
3018 	if (n_bytes < 1 || n_bytes > dev->param.total_bytes_per_chunk) {
3019 		yaffs_trace(YAFFS_TRACE_ERROR,
3020 		  "Writing %d bytes to chunk!!!!!!!!!",
3021 		   n_bytes);
3022 		BUG();
3023 	}
3024 
3025 	new_chunk_id =
3026 	    yaffs_write_new_chunk(dev, buffer, &new_tags, use_reserve);
3027 
3028 	if (new_chunk_id > 0) {
3029 		yaffs_put_chunk_in_file(in, inode_chunk, new_chunk_id, 0);
3030 
3031 		if (prev_chunk_id > 0)
3032 			yaffs_chunk_del(dev, prev_chunk_id, 1, __LINE__);
3033 
3034 		yaffs_verify_file_sane(in);
3035 	}
3036 	return new_chunk_id;
3037 
3038 }
3039 
3040 
3041 
yaffs_do_xattrib_mod(struct yaffs_obj * obj,int set,const YCHAR * name,const void * value,int size,int flags)3042 static int yaffs_do_xattrib_mod(struct yaffs_obj *obj, int set,
3043 				const YCHAR *name, const void *value, int size,
3044 				int flags)
3045 {
3046 	struct yaffs_xattr_mod xmod;
3047 	int result;
3048 
3049 	xmod.set = set;
3050 	xmod.name = name;
3051 	xmod.data = value;
3052 	xmod.size = size;
3053 	xmod.flags = flags;
3054 	xmod.result = -ENOSPC;
3055 
3056 	result = yaffs_update_oh(obj, NULL, 0, 0, 0, &xmod);
3057 
3058 	if (result > 0)
3059 		return xmod.result;
3060 	else
3061 		return -ENOSPC;
3062 }
3063 
yaffs_apply_xattrib_mod(struct yaffs_obj * obj,char * buffer,struct yaffs_xattr_mod * xmod)3064 static int yaffs_apply_xattrib_mod(struct yaffs_obj *obj, char *buffer,
3065 				   struct yaffs_xattr_mod *xmod)
3066 {
3067 	int retval = 0;
3068 	int x_offs = sizeof(struct yaffs_obj_hdr);
3069 	struct yaffs_dev *dev = obj->my_dev;
3070 	int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr);
3071 	char *x_buffer = buffer + x_offs;
3072 
3073 	if (xmod->set)
3074 		retval =
3075 		    nval_set(x_buffer, x_size, xmod->name, xmod->data,
3076 			     xmod->size, xmod->flags);
3077 	else
3078 		retval = nval_del(x_buffer, x_size, xmod->name);
3079 
3080 	obj->has_xattr = nval_hasvalues(x_buffer, x_size);
3081 	obj->xattr_known = 1;
3082 	xmod->result = retval;
3083 
3084 	return retval;
3085 }
3086 
yaffs_do_xattrib_fetch(struct yaffs_obj * obj,const YCHAR * name,void * value,int size)3087 static int yaffs_do_xattrib_fetch(struct yaffs_obj *obj, const YCHAR *name,
3088 				  void *value, int size)
3089 {
3090 	char *buffer = NULL;
3091 	int result;
3092 	struct yaffs_ext_tags tags;
3093 	struct yaffs_dev *dev = obj->my_dev;
3094 	int x_offs = sizeof(struct yaffs_obj_hdr);
3095 	int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr);
3096 	char *x_buffer;
3097 	int retval = 0;
3098 
3099 	if (obj->hdr_chunk < 1)
3100 		return -ENODATA;
3101 
3102 	/* If we know that the object has no xattribs then don't do all the
3103 	 * reading and parsing.
3104 	 */
3105 	if (obj->xattr_known && !obj->has_xattr) {
3106 		if (name)
3107 			return -ENODATA;
3108 		else
3109 			return 0;
3110 	}
3111 
3112 	buffer = (char *)yaffs_get_temp_buffer(dev);
3113 	if (!buffer)
3114 		return -ENOMEM;
3115 
3116 	result =
3117 	    yaffs_rd_chunk_tags_nand(dev, obj->hdr_chunk, (u8 *) buffer, &tags);
3118 
3119 	if (result != YAFFS_OK)
3120 		retval = -ENOENT;
3121 	else {
3122 		x_buffer = buffer + x_offs;
3123 
3124 		if (!obj->xattr_known) {
3125 			obj->has_xattr = nval_hasvalues(x_buffer, x_size);
3126 			obj->xattr_known = 1;
3127 		}
3128 
3129 		if (name)
3130 			retval = nval_get(x_buffer, x_size, name, value, size);
3131 		else
3132 			retval = nval_list(x_buffer, x_size, value, size);
3133 	}
3134 	yaffs_release_temp_buffer(dev, (u8 *) buffer);
3135 	return retval;
3136 }
3137 
yaffs_set_xattrib(struct yaffs_obj * obj,const YCHAR * name,const void * value,int size,int flags)3138 int yaffs_set_xattrib(struct yaffs_obj *obj, const YCHAR * name,
3139 		      const void *value, int size, int flags)
3140 {
3141 	return yaffs_do_xattrib_mod(obj, 1, name, value, size, flags);
3142 }
3143 
yaffs_remove_xattrib(struct yaffs_obj * obj,const YCHAR * name)3144 int yaffs_remove_xattrib(struct yaffs_obj *obj, const YCHAR * name)
3145 {
3146 	return yaffs_do_xattrib_mod(obj, 0, name, NULL, 0, 0);
3147 }
3148 
yaffs_get_xattrib(struct yaffs_obj * obj,const YCHAR * name,void * value,int size)3149 int yaffs_get_xattrib(struct yaffs_obj *obj, const YCHAR * name, void *value,
3150 		      int size)
3151 {
3152 	return yaffs_do_xattrib_fetch(obj, name, value, size);
3153 }
3154 
yaffs_list_xattrib(struct yaffs_obj * obj,char * buffer,int size)3155 int yaffs_list_xattrib(struct yaffs_obj *obj, char *buffer, int size)
3156 {
3157 	return yaffs_do_xattrib_fetch(obj, NULL, buffer, size);
3158 }
3159 
yaffs_check_obj_details_loaded(struct yaffs_obj * in)3160 static void yaffs_check_obj_details_loaded(struct yaffs_obj *in)
3161 {
3162 	u8 *buf;
3163 	struct yaffs_obj_hdr *oh;
3164 	struct yaffs_dev *dev;
3165 	struct yaffs_ext_tags tags;
3166 	int result;
3167 	int alloc_failed = 0;
3168 
3169 	if (!in || !in->lazy_loaded || in->hdr_chunk < 1)
3170 		return;
3171 
3172 	dev = in->my_dev;
3173 	in->lazy_loaded = 0;
3174 	buf = yaffs_get_temp_buffer(dev);
3175 
3176 	result = yaffs_rd_chunk_tags_nand(dev, in->hdr_chunk, buf, &tags);
3177 	oh = (struct yaffs_obj_hdr *)buf;
3178 
3179 	in->yst_mode = oh->yst_mode;
3180 	yaffs_load_attribs(in, oh);
3181 	yaffs_set_obj_name_from_oh(in, oh);
3182 
3183 	if (in->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) {
3184 		in->variant.symlink_variant.alias =
3185 		    yaffs_clone_str(oh->alias);
3186 		if (!in->variant.symlink_variant.alias)
3187 			alloc_failed = 1;	/* Not returned */
3188 	}
3189 	yaffs_release_temp_buffer(dev, buf);
3190 }
3191 
yaffs_load_name_from_oh(struct yaffs_dev * dev,YCHAR * name,const YCHAR * oh_name,int buff_size)3192 static void yaffs_load_name_from_oh(struct yaffs_dev *dev, YCHAR *name,
3193 				    const YCHAR *oh_name, int buff_size)
3194 {
3195 #ifdef CONFIG_YAFFS_AUTO_UNICODE
3196 	if (dev->param.auto_unicode) {
3197 		if (*oh_name) {
3198 			/* It is an ASCII name, do an ASCII to
3199 			 * unicode conversion */
3200 			const char *ascii_oh_name = (const char *)oh_name;
3201 			int n = buff_size - 1;
3202 			while (n > 0 && *ascii_oh_name) {
3203 				*name = *ascii_oh_name;
3204 				name++;
3205 				ascii_oh_name++;
3206 				n--;
3207 			}
3208 		} else {
3209 			strncpy(name, oh_name + 1, buff_size - 1);
3210 		}
3211 	} else {
3212 #else
3213 	(void) dev;
3214 	{
3215 #endif
3216 		strncpy(name, oh_name, buff_size - 1);
3217 	}
3218 }
3219 
3220 static void yaffs_load_oh_from_name(struct yaffs_dev *dev, YCHAR *oh_name,
3221 				    const YCHAR *name)
3222 {
3223 #ifdef CONFIG_YAFFS_AUTO_UNICODE
3224 
3225 	int is_ascii;
3226 	YCHAR *w;
3227 
3228 	if (dev->param.auto_unicode) {
3229 
3230 		is_ascii = 1;
3231 		w = name;
3232 
3233 		/* Figure out if the name will fit in ascii character set */
3234 		while (is_ascii && *w) {
3235 			if ((*w) & 0xff00)
3236 				is_ascii = 0;
3237 			w++;
3238 		}
3239 
3240 		if (is_ascii) {
3241 			/* It is an ASCII name, so convert unicode to ascii */
3242 			char *ascii_oh_name = (char *)oh_name;
3243 			int n = YAFFS_MAX_NAME_LENGTH - 1;
3244 			while (n > 0 && *name) {
3245 				*ascii_oh_name = *name;
3246 				name++;
3247 				ascii_oh_name++;
3248 				n--;
3249 			}
3250 		} else {
3251 			/* Unicode name, so save starting at the second YCHAR */
3252 			*oh_name = 0;
3253 			strncpy(oh_name + 1, name, YAFFS_MAX_NAME_LENGTH - 2);
3254 		}
3255 	} else {
3256 #else
3257 	dev = dev;
3258 	{
3259 #endif
3260 		strncpy(oh_name, name, YAFFS_MAX_NAME_LENGTH - 1);
3261 	}
3262 }
3263 
3264 /* UpdateObjectHeader updates the header on NAND for an object.
3265  * If name is not NULL, then that new name is used.
3266  */
3267 int yaffs_update_oh(struct yaffs_obj *in, const YCHAR *name, int force,
3268 		    int is_shrink, int shadows, struct yaffs_xattr_mod *xmod)
3269 {
3270 
3271 	struct yaffs_block_info *bi;
3272 	struct yaffs_dev *dev = in->my_dev;
3273 	int prev_chunk_id;
3274 	int ret_val = 0;
3275 	int result = 0;
3276 	int new_chunk_id;
3277 	struct yaffs_ext_tags new_tags;
3278 	struct yaffs_ext_tags old_tags;
3279 	const YCHAR *alias = NULL;
3280 	u8 *buffer = NULL;
3281 	YCHAR old_name[YAFFS_MAX_NAME_LENGTH + 1];
3282 	struct yaffs_obj_hdr *oh = NULL;
3283 	loff_t file_size = 0;
3284 
3285 	strcpy(old_name, _Y("silly old name"));
3286 
3287 	if (in->fake && in != dev->root_dir && !force && !xmod)
3288 		return ret_val;
3289 
3290 	yaffs_check_gc(dev, 0);
3291 	yaffs_check_obj_details_loaded(in);
3292 
3293 	buffer = yaffs_get_temp_buffer(in->my_dev);
3294 	oh = (struct yaffs_obj_hdr *)buffer;
3295 
3296 	prev_chunk_id = in->hdr_chunk;
3297 
3298 	if (prev_chunk_id > 0) {
3299 		result = yaffs_rd_chunk_tags_nand(dev, prev_chunk_id,
3300 						  buffer, &old_tags);
3301 
3302 		yaffs_verify_oh(in, oh, &old_tags, 0);
3303 		memcpy(old_name, oh->name, sizeof(oh->name));
3304 		memset(buffer, 0xff, sizeof(struct yaffs_obj_hdr));
3305 	} else {
3306 		memset(buffer, 0xff, dev->data_bytes_per_chunk);
3307 	}
3308 
3309 	oh->type = in->variant_type;
3310 	oh->yst_mode = in->yst_mode;
3311 	oh->shadows_obj = oh->inband_shadowed_obj_id = shadows;
3312 
3313 	yaffs_load_attribs_oh(oh, in);
3314 
3315 	if (in->parent)
3316 		oh->parent_obj_id = in->parent->obj_id;
3317 	else
3318 		oh->parent_obj_id = 0;
3319 
3320 	if (name && *name) {
3321 		memset(oh->name, 0, sizeof(oh->name));
3322 		yaffs_load_oh_from_name(dev, oh->name, name);
3323 	} else if (prev_chunk_id > 0) {
3324 		memcpy(oh->name, old_name, sizeof(oh->name));
3325 	} else {
3326 		memset(oh->name, 0, sizeof(oh->name));
3327 	}
3328 
3329 	oh->is_shrink = is_shrink;
3330 
3331 	switch (in->variant_type) {
3332 	case YAFFS_OBJECT_TYPE_UNKNOWN:
3333 		/* Should not happen */
3334 		break;
3335 	case YAFFS_OBJECT_TYPE_FILE:
3336 		if (oh->parent_obj_id != YAFFS_OBJECTID_DELETED &&
3337 		    oh->parent_obj_id != YAFFS_OBJECTID_UNLINKED)
3338 			file_size = in->variant.file_variant.file_size;
3339 		yaffs_oh_size_load(oh, file_size);
3340 		break;
3341 	case YAFFS_OBJECT_TYPE_HARDLINK:
3342 		oh->equiv_id = in->variant.hardlink_variant.equiv_id;
3343 		break;
3344 	case YAFFS_OBJECT_TYPE_SPECIAL:
3345 		/* Do nothing */
3346 		break;
3347 	case YAFFS_OBJECT_TYPE_DIRECTORY:
3348 		/* Do nothing */
3349 		break;
3350 	case YAFFS_OBJECT_TYPE_SYMLINK:
3351 		alias = in->variant.symlink_variant.alias;
3352 		if (!alias)
3353 			alias = _Y("no alias");
3354 		strncpy(oh->alias, alias, YAFFS_MAX_ALIAS_LENGTH);
3355 		oh->alias[YAFFS_MAX_ALIAS_LENGTH] = 0;
3356 		break;
3357 	}
3358 
3359 	/* process any xattrib modifications */
3360 	if (xmod)
3361 		yaffs_apply_xattrib_mod(in, (char *)buffer, xmod);
3362 
3363 	/* Tags */
3364 	memset(&new_tags, 0, sizeof(new_tags));
3365 	in->serial++;
3366 	new_tags.chunk_id = 0;
3367 	new_tags.obj_id = in->obj_id;
3368 	new_tags.serial_number = in->serial;
3369 
3370 	/* Add extra info for file header */
3371 	new_tags.extra_available = 1;
3372 	new_tags.extra_parent_id = oh->parent_obj_id;
3373 	new_tags.extra_file_size = file_size;
3374 	new_tags.extra_is_shrink = oh->is_shrink;
3375 	new_tags.extra_equiv_id = oh->equiv_id;
3376 	new_tags.extra_shadows = (oh->shadows_obj > 0) ? 1 : 0;
3377 	new_tags.extra_obj_type = in->variant_type;
3378 	yaffs_verify_oh(in, oh, &new_tags, 1);
3379 
3380 	/* Create new chunk in NAND */
3381 	new_chunk_id =
3382 	    yaffs_write_new_chunk(dev, buffer, &new_tags,
3383 				  (prev_chunk_id > 0) ? 1 : 0);
3384 
3385 	if (buffer)
3386 		yaffs_release_temp_buffer(dev, buffer);
3387 
3388 	if (new_chunk_id < 0)
3389 		return new_chunk_id;
3390 
3391 	in->hdr_chunk = new_chunk_id;
3392 
3393 	if (prev_chunk_id > 0)
3394 		yaffs_chunk_del(dev, prev_chunk_id, 1, __LINE__);
3395 
3396 	if (!yaffs_obj_cache_dirty(in))
3397 		in->dirty = 0;
3398 
3399 	/* If this was a shrink, then mark the block
3400 	 * that the chunk lives on */
3401 	if (is_shrink) {
3402 		bi = yaffs_get_block_info(in->my_dev,
3403 					  new_chunk_id /
3404 					  in->my_dev->param.chunks_per_block);
3405 		bi->has_shrink_hdr = 1;
3406 	}
3407 
3408 
3409 	return new_chunk_id;
3410 }
3411 
3412 /*--------------------- File read/write ------------------------
3413  * Read and write have very similar structures.
3414  * In general the read/write has three parts to it
3415  * An incomplete chunk to start with (if the read/write is not chunk-aligned)
3416  * Some complete chunks
3417  * An incomplete chunk to end off with
3418  *
3419  * Curve-balls: the first chunk might also be the last chunk.
3420  */
3421 
3422 int yaffs_file_rd(struct yaffs_obj *in, u8 * buffer, loff_t offset, int n_bytes)
3423 {
3424 	int chunk;
3425 	u32 start;
3426 	int n_copy;
3427 	int n = n_bytes;
3428 	int n_done = 0;
3429 	struct yaffs_cache *cache;
3430 	struct yaffs_dev *dev;
3431 
3432 	dev = in->my_dev;
3433 
3434 	while (n > 0) {
3435 		yaffs_addr_to_chunk(dev, offset, &chunk, &start);
3436 		chunk++;
3437 
3438 		/* OK now check for the curveball where the start and end are in
3439 		 * the same chunk.
3440 		 */
3441 		if ((start + n) < dev->data_bytes_per_chunk)
3442 			n_copy = n;
3443 		else
3444 			n_copy = dev->data_bytes_per_chunk - start;
3445 
3446 		cache = yaffs_find_chunk_cache(in, chunk);
3447 
3448 		/* If the chunk is already in the cache or it is less than
3449 		 * a whole chunk or we're using inband tags then use the cache
3450 		 * (if there is caching) else bypass the cache.
3451 		 */
3452 		if (cache || n_copy != dev->data_bytes_per_chunk ||
3453 		    dev->param.inband_tags) {
3454 			if (dev->param.n_caches > 0) {
3455 
3456 				/* If we can't find the data in the cache,
3457 				 * then load it up. */
3458 
3459 				if (!cache) {
3460 					cache =
3461 					    yaffs_grab_chunk_cache(in->my_dev);
3462 					cache->object = in;
3463 					cache->chunk_id = chunk;
3464 					cache->dirty = 0;
3465 					cache->locked = 0;
3466 					yaffs_rd_data_obj(in, chunk,
3467 							  cache->data);
3468 					cache->n_bytes = 0;
3469 				}
3470 
3471 				yaffs_use_cache(dev, cache, 0);
3472 
3473 				cache->locked = 1;
3474 
3475 				memcpy(buffer, &cache->data[start], n_copy);
3476 
3477 				cache->locked = 0;
3478 			} else {
3479 				/* Read into the local buffer then copy.. */
3480 
3481 				u8 *local_buffer =
3482 				    yaffs_get_temp_buffer(dev);
3483 				yaffs_rd_data_obj(in, chunk, local_buffer);
3484 
3485 				memcpy(buffer, &local_buffer[start], n_copy);
3486 
3487 				yaffs_release_temp_buffer(dev, local_buffer);
3488 			}
3489 		} else {
3490 			/* A full chunk. Read directly into the buffer. */
3491 			yaffs_rd_data_obj(in, chunk, buffer);
3492 		}
3493 		n -= n_copy;
3494 		offset += n_copy;
3495 		buffer += n_copy;
3496 		n_done += n_copy;
3497 	}
3498 	return n_done;
3499 }
3500 
3501 int yaffs_do_file_wr(struct yaffs_obj *in, const u8 *buffer, loff_t offset,
3502 		     int n_bytes, int write_through)
3503 {
3504 
3505 	int chunk;
3506 	u32 start;
3507 	int n_copy;
3508 	int n = n_bytes;
3509 	int n_done = 0;
3510 	int n_writeback;
3511 	loff_t start_write = offset;
3512 	int chunk_written = 0;
3513 	u32 n_bytes_read;
3514 	loff_t chunk_start;
3515 	struct yaffs_dev *dev;
3516 
3517 	dev = in->my_dev;
3518 
3519 	while (n > 0 && chunk_written >= 0) {
3520 		yaffs_addr_to_chunk(dev, offset, &chunk, &start);
3521 
3522 		if (((loff_t)chunk) *
3523 		    dev->data_bytes_per_chunk + start != offset ||
3524 		    start >= dev->data_bytes_per_chunk) {
3525 			yaffs_trace(YAFFS_TRACE_ERROR,
3526 				"AddrToChunk of offset %lld gives chunk %d start %d",
3527 				offset, chunk, start);
3528 		}
3529 		chunk++;	/* File pos to chunk in file offset */
3530 
3531 		/* OK now check for the curveball where the start and end are in
3532 		 * the same chunk.
3533 		 */
3534 
3535 		if ((start + n) < dev->data_bytes_per_chunk) {
3536 			n_copy = n;
3537 
3538 			/* Now calculate how many bytes to write back....
3539 			 * If we're overwriting and not writing to then end of
3540 			 * file then we need to write back as much as was there
3541 			 * before.
3542 			 */
3543 
3544 			chunk_start = (((loff_t)(chunk - 1)) *
3545 					dev->data_bytes_per_chunk);
3546 
3547 			if (chunk_start > in->variant.file_variant.file_size)
3548 				n_bytes_read = 0;	/* Past end of file */
3549 			else
3550 				n_bytes_read =
3551 				    in->variant.file_variant.file_size -
3552 				    chunk_start;
3553 
3554 			if (n_bytes_read > dev->data_bytes_per_chunk)
3555 				n_bytes_read = dev->data_bytes_per_chunk;
3556 
3557 			n_writeback =
3558 			    (n_bytes_read >
3559 			     (start + n)) ? n_bytes_read : (start + n);
3560 
3561 			if (n_writeback < 0 ||
3562 			    n_writeback > dev->data_bytes_per_chunk)
3563 				BUG();
3564 
3565 		} else {
3566 			n_copy = dev->data_bytes_per_chunk - start;
3567 			n_writeback = dev->data_bytes_per_chunk;
3568 		}
3569 
3570 		if (n_copy != dev->data_bytes_per_chunk ||
3571 		    !dev->param.cache_bypass_aligned ||
3572 		    dev->param.inband_tags) {
3573 			/* An incomplete start or end chunk (or maybe both
3574 			 * start and end chunk), or we're using inband tags,
3575 			 * or we're forcing writes through the cache,
3576 			 * so we want to use the cache buffers.
3577 			 */
3578 			if (dev->param.n_caches > 0) {
3579 				struct yaffs_cache *cache;
3580 
3581 				/* If we can't find the data in the cache, then
3582 				 * load the cache */
3583 				cache = yaffs_find_chunk_cache(in, chunk);
3584 
3585 				if (!cache &&
3586 				    yaffs_check_alloc_available(dev, 1)) {
3587 					cache = yaffs_grab_chunk_cache(dev);
3588 					cache->object = in;
3589 					cache->chunk_id = chunk;
3590 					cache->dirty = 0;
3591 					cache->locked = 0;
3592 					yaffs_rd_data_obj(in, chunk,
3593 							  cache->data);
3594 				} else if (cache &&
3595 					   !cache->dirty &&
3596 					   !yaffs_check_alloc_available(dev,
3597 									1)) {
3598 					/* Drop the cache if it was a read cache
3599 					 * item and no space check has been made
3600 					 * for it.
3601 					 */
3602 					cache = NULL;
3603 				}
3604 
3605 				if (cache) {
3606 					yaffs_use_cache(dev, cache, 1);
3607 					cache->locked = 1;
3608 
3609 					memcpy(&cache->data[start], buffer,
3610 					       n_copy);
3611 
3612 					cache->locked = 0;
3613 					cache->n_bytes = n_writeback;
3614 
3615 					if (write_through) {
3616 						chunk_written =
3617 						    yaffs_wr_data_obj
3618 						    (cache->object,
3619 						     cache->chunk_id,
3620 						     cache->data,
3621 						     cache->n_bytes, 1);
3622 						cache->dirty = 0;
3623 					}
3624 				} else {
3625 					chunk_written = -1;	/* fail write */
3626 				}
3627 			} else {
3628 				/* An incomplete start or end chunk (or maybe
3629 				 * both start and end chunk). Read into the
3630 				 * local buffer then copy over and write back.
3631 				 */
3632 
3633 				u8 *local_buffer = yaffs_get_temp_buffer(dev);
3634 
3635 				yaffs_rd_data_obj(in, chunk, local_buffer);
3636 				memcpy(&local_buffer[start], buffer, n_copy);
3637 
3638 				chunk_written =
3639 				    yaffs_wr_data_obj(in, chunk,
3640 						      local_buffer,
3641 						      n_writeback, 0);
3642 
3643 				yaffs_release_temp_buffer(dev, local_buffer);
3644 			}
3645 		} else {
3646 			/* A full chunk. Write directly from the buffer. */
3647 
3648 			chunk_written =
3649 			    yaffs_wr_data_obj(in, chunk, buffer,
3650 					      dev->data_bytes_per_chunk, 0);
3651 
3652 			/* Since we've overwritten the cached data,
3653 			 * we better invalidate it. */
3654 			yaffs_invalidate_chunk_cache(in, chunk);
3655 		}
3656 
3657 		if (chunk_written >= 0) {
3658 			n -= n_copy;
3659 			offset += n_copy;
3660 			buffer += n_copy;
3661 			n_done += n_copy;
3662 		}
3663 	}
3664 
3665 	/* Update file object */
3666 
3667 	if ((start_write + n_done) > in->variant.file_variant.file_size)
3668 		in->variant.file_variant.file_size = (start_write + n_done);
3669 
3670 	in->dirty = 1;
3671 	return n_done;
3672 }
3673 
3674 int yaffs_wr_file(struct yaffs_obj *in, const u8 *buffer, loff_t offset,
3675 		  int n_bytes, int write_through)
3676 {
3677 	yaffs2_handle_hole(in, offset);
3678 	return yaffs_do_file_wr(in, buffer, offset, n_bytes, write_through);
3679 }
3680 
3681 /* ---------------------- File resizing stuff ------------------ */
3682 
3683 static void yaffs_prune_chunks(struct yaffs_obj *in, loff_t new_size)
3684 {
3685 
3686 	struct yaffs_dev *dev = in->my_dev;
3687 	loff_t old_size = in->variant.file_variant.file_size;
3688 	int i;
3689 	int chunk_id;
3690 	u32 dummy;
3691 	int last_del;
3692 	int start_del;
3693 
3694 	if (old_size > 0)
3695 		yaffs_addr_to_chunk(dev, old_size - 1, &last_del, &dummy);
3696 	else
3697 		last_del = 0;
3698 
3699 	yaffs_addr_to_chunk(dev, new_size + dev->data_bytes_per_chunk - 1,
3700 				&start_del, &dummy);
3701 	last_del++;
3702 	start_del++;
3703 
3704 	/* Delete backwards so that we don't end up with holes if
3705 	 * power is lost part-way through the operation.
3706 	 */
3707 	for (i = last_del; i >= start_del; i--) {
3708 		/* NB this could be optimised somewhat,
3709 		 * eg. could retrieve the tags and write them without
3710 		 * using yaffs_chunk_del
3711 		 */
3712 
3713 		chunk_id = yaffs_find_del_file_chunk(in, i, NULL);
3714 
3715 		if (chunk_id < 1)
3716 			continue;
3717 
3718 		if (chunk_id <
3719 		    (dev->internal_start_block * dev->param.chunks_per_block) ||
3720 		    chunk_id >=
3721 		    ((dev->internal_end_block + 1) *
3722 		      dev->param.chunks_per_block)) {
3723 			yaffs_trace(YAFFS_TRACE_ALWAYS,
3724 				"Found daft chunk_id %d for %d",
3725 				chunk_id, i);
3726 		} else {
3727 			in->n_data_chunks--;
3728 			yaffs_chunk_del(dev, chunk_id, 1, __LINE__);
3729 		}
3730 	}
3731 }
3732 
3733 void yaffs_resize_file_down(struct yaffs_obj *obj, loff_t new_size)
3734 {
3735 	int new_full;
3736 	u32 new_partial;
3737 	struct yaffs_dev *dev = obj->my_dev;
3738 
3739 	yaffs_addr_to_chunk(dev, new_size, &new_full, &new_partial);
3740 
3741 	yaffs_prune_chunks(obj, new_size);
3742 
3743 	if (new_partial != 0) {
3744 		int last_chunk = 1 + new_full;
3745 		u8 *local_buffer = yaffs_get_temp_buffer(dev);
3746 
3747 		/* Rewrite the last chunk with its new size and zero pad */
3748 		yaffs_rd_data_obj(obj, last_chunk, local_buffer);
3749 		memset(local_buffer + new_partial, 0,
3750 		       dev->data_bytes_per_chunk - new_partial);
3751 
3752 		yaffs_wr_data_obj(obj, last_chunk, local_buffer,
3753 				  new_partial, 1);
3754 
3755 		yaffs_release_temp_buffer(dev, local_buffer);
3756 	}
3757 
3758 	obj->variant.file_variant.file_size = new_size;
3759 
3760 	yaffs_prune_tree(dev, &obj->variant.file_variant);
3761 }
3762 
3763 int yaffs_resize_file(struct yaffs_obj *in, loff_t new_size)
3764 {
3765 	struct yaffs_dev *dev = in->my_dev;
3766 	loff_t old_size = in->variant.file_variant.file_size;
3767 
3768 	yaffs_flush_file_cache(in);
3769 	yaffs_invalidate_whole_cache(in);
3770 
3771 	yaffs_check_gc(dev, 0);
3772 
3773 	if (in->variant_type != YAFFS_OBJECT_TYPE_FILE)
3774 		return YAFFS_FAIL;
3775 
3776 	if (new_size == old_size)
3777 		return YAFFS_OK;
3778 
3779 	if (new_size > old_size) {
3780 		yaffs2_handle_hole(in, new_size);
3781 		in->variant.file_variant.file_size = new_size;
3782 	} else {
3783 		/* new_size < old_size */
3784 		yaffs_resize_file_down(in, new_size);
3785 	}
3786 
3787 	/* Write a new object header to reflect the resize.
3788 	 * show we've shrunk the file, if need be
3789 	 * Do this only if the file is not in the deleted directories
3790 	 * and is not shadowed.
3791 	 */
3792 	if (in->parent &&
3793 	    !in->is_shadowed &&
3794 	    in->parent->obj_id != YAFFS_OBJECTID_UNLINKED &&
3795 	    in->parent->obj_id != YAFFS_OBJECTID_DELETED)
3796 		yaffs_update_oh(in, NULL, 0, 0, 0, NULL);
3797 
3798 	return YAFFS_OK;
3799 }
3800 
3801 int yaffs_flush_file(struct yaffs_obj *in, int update_time, int data_sync)
3802 {
3803 	if (!in->dirty)
3804 		return YAFFS_OK;
3805 
3806 	yaffs_flush_file_cache(in);
3807 
3808 	if (data_sync)
3809 		return YAFFS_OK;
3810 
3811 	if (update_time)
3812 		yaffs_load_current_time(in, 0, 0);
3813 
3814 	return (yaffs_update_oh(in, NULL, 0, 0, 0, NULL) >= 0) ?
3815 				YAFFS_OK : YAFFS_FAIL;
3816 }
3817 
3818 
3819 /* yaffs_del_file deletes the whole file data
3820  * and the inode associated with the file.
3821  * It does not delete the links associated with the file.
3822  */
3823 static int yaffs_unlink_file_if_needed(struct yaffs_obj *in)
3824 {
3825 	int ret_val;
3826 	int del_now = 0;
3827 	struct yaffs_dev *dev = in->my_dev;
3828 
3829 	if (!in->my_inode)
3830 		del_now = 1;
3831 
3832 	if (del_now) {
3833 		ret_val =
3834 		    yaffs_change_obj_name(in, in->my_dev->del_dir,
3835 					  _Y("deleted"), 0, 0);
3836 		yaffs_trace(YAFFS_TRACE_TRACING,
3837 			"yaffs: immediate deletion of file %d",
3838 			in->obj_id);
3839 		in->deleted = 1;
3840 		in->my_dev->n_deleted_files++;
3841 		if (dev->param.disable_soft_del || dev->param.is_yaffs2)
3842 			yaffs_resize_file(in, 0);
3843 		yaffs_soft_del_file(in);
3844 	} else {
3845 		ret_val =
3846 		    yaffs_change_obj_name(in, in->my_dev->unlinked_dir,
3847 					  _Y("unlinked"), 0, 0);
3848 	}
3849 	return ret_val;
3850 }
3851 
3852 static int yaffs_del_file(struct yaffs_obj *in)
3853 {
3854 	int ret_val = YAFFS_OK;
3855 	int deleted;	/* Need to cache value on stack if in is freed */
3856 	struct yaffs_dev *dev = in->my_dev;
3857 
3858 	if (dev->param.disable_soft_del || dev->param.is_yaffs2)
3859 		yaffs_resize_file(in, 0);
3860 
3861 	if (in->n_data_chunks > 0) {
3862 		/* Use soft deletion if there is data in the file.
3863 		 * That won't be the case if it has been resized to zero.
3864 		 */
3865 		if (!in->unlinked)
3866 			ret_val = yaffs_unlink_file_if_needed(in);
3867 
3868 		deleted = in->deleted;
3869 
3870 		if (ret_val == YAFFS_OK && in->unlinked && !in->deleted) {
3871 			in->deleted = 1;
3872 			deleted = 1;
3873 			in->my_dev->n_deleted_files++;
3874 			yaffs_soft_del_file(in);
3875 		}
3876 		return deleted ? YAFFS_OK : YAFFS_FAIL;
3877 	} else {
3878 		/* The file has no data chunks so we toss it immediately */
3879 		yaffs_free_tnode(in->my_dev, in->variant.file_variant.top);
3880 		in->variant.file_variant.top = NULL;
3881 		yaffs_generic_obj_del(in);
3882 
3883 		return YAFFS_OK;
3884 	}
3885 }
3886 
3887 int yaffs_is_non_empty_dir(struct yaffs_obj *obj)
3888 {
3889 	return (obj &&
3890 		obj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY) &&
3891 		!(list_empty(&obj->variant.dir_variant.children));
3892 }
3893 
3894 static int yaffs_del_dir(struct yaffs_obj *obj)
3895 {
3896 	/* First check that the directory is empty. */
3897 	if (yaffs_is_non_empty_dir(obj))
3898 		return YAFFS_FAIL;
3899 
3900 	return yaffs_generic_obj_del(obj);
3901 }
3902 
3903 static int yaffs_del_symlink(struct yaffs_obj *in)
3904 {
3905 	kfree(in->variant.symlink_variant.alias);
3906 	in->variant.symlink_variant.alias = NULL;
3907 
3908 	return yaffs_generic_obj_del(in);
3909 }
3910 
3911 static int yaffs_del_link(struct yaffs_obj *in)
3912 {
3913 	/* remove this hardlink from the list associated with the equivalent
3914 	 * object
3915 	 */
3916 	list_del_init(&in->hard_links);
3917 	return yaffs_generic_obj_del(in);
3918 }
3919 
3920 int yaffs_del_obj(struct yaffs_obj *obj)
3921 {
3922 	int ret_val = -1;
3923 
3924 	switch (obj->variant_type) {
3925 	case YAFFS_OBJECT_TYPE_FILE:
3926 		ret_val = yaffs_del_file(obj);
3927 		break;
3928 	case YAFFS_OBJECT_TYPE_DIRECTORY:
3929 		if (!list_empty(&obj->variant.dir_variant.dirty)) {
3930 			yaffs_trace(YAFFS_TRACE_BACKGROUND,
3931 				"Remove object %d from dirty directories",
3932 				obj->obj_id);
3933 			list_del_init(&obj->variant.dir_variant.dirty);
3934 		}
3935 		return yaffs_del_dir(obj);
3936 		break;
3937 	case YAFFS_OBJECT_TYPE_SYMLINK:
3938 		ret_val = yaffs_del_symlink(obj);
3939 		break;
3940 	case YAFFS_OBJECT_TYPE_HARDLINK:
3941 		ret_val = yaffs_del_link(obj);
3942 		break;
3943 	case YAFFS_OBJECT_TYPE_SPECIAL:
3944 		ret_val = yaffs_generic_obj_del(obj);
3945 		break;
3946 	case YAFFS_OBJECT_TYPE_UNKNOWN:
3947 		ret_val = 0;
3948 		break;		/* should not happen. */
3949 	}
3950 	return ret_val;
3951 }
3952 
3953 static int yaffs_unlink_worker(struct yaffs_obj *obj)
3954 {
3955 	int del_now = 0;
3956 
3957 	if (!obj)
3958 		return YAFFS_FAIL;
3959 
3960 	if (!obj->my_inode)
3961 		del_now = 1;
3962 
3963 	yaffs_update_parent(obj->parent);
3964 
3965 	if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK) {
3966 		return yaffs_del_link(obj);
3967 	} else if (!list_empty(&obj->hard_links)) {
3968 		/* Curve ball: We're unlinking an object that has a hardlink.
3969 		 *
3970 		 * This problem arises because we are not strictly following
3971 		 * The Linux link/inode model.
3972 		 *
3973 		 * We can't really delete the object.
3974 		 * Instead, we do the following:
3975 		 * - Select a hardlink.
3976 		 * - Unhook it from the hard links
3977 		 * - Move it from its parent directory so that the rename works.
3978 		 * - Rename the object to the hardlink's name.
3979 		 * - Delete the hardlink
3980 		 */
3981 
3982 		struct yaffs_obj *hl;
3983 		struct yaffs_obj *parent;
3984 		int ret_val;
3985 		YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
3986 
3987 		hl = list_entry(obj->hard_links.next, struct yaffs_obj,
3988 				hard_links);
3989 
3990 		yaffs_get_obj_name(hl, name, YAFFS_MAX_NAME_LENGTH + 1);
3991 		parent = hl->parent;
3992 
3993 		list_del_init(&hl->hard_links);
3994 
3995 		yaffs_add_obj_to_dir(obj->my_dev->unlinked_dir, hl);
3996 
3997 		ret_val = yaffs_change_obj_name(obj, parent, name, 0, 0);
3998 
3999 		if (ret_val == YAFFS_OK)
4000 			ret_val = yaffs_generic_obj_del(hl);
4001 
4002 		return ret_val;
4003 
4004 	} else if (del_now) {
4005 		switch (obj->variant_type) {
4006 		case YAFFS_OBJECT_TYPE_FILE:
4007 			return yaffs_del_file(obj);
4008 			break;
4009 		case YAFFS_OBJECT_TYPE_DIRECTORY:
4010 			list_del_init(&obj->variant.dir_variant.dirty);
4011 			return yaffs_del_dir(obj);
4012 			break;
4013 		case YAFFS_OBJECT_TYPE_SYMLINK:
4014 			return yaffs_del_symlink(obj);
4015 			break;
4016 		case YAFFS_OBJECT_TYPE_SPECIAL:
4017 			return yaffs_generic_obj_del(obj);
4018 			break;
4019 		case YAFFS_OBJECT_TYPE_HARDLINK:
4020 		case YAFFS_OBJECT_TYPE_UNKNOWN:
4021 		default:
4022 			return YAFFS_FAIL;
4023 		}
4024 	} else if (yaffs_is_non_empty_dir(obj)) {
4025 		return YAFFS_FAIL;
4026 	} else {
4027 		return yaffs_change_obj_name(obj, obj->my_dev->unlinked_dir,
4028 						_Y("unlinked"), 0, 0);
4029 	}
4030 }
4031 
4032 static int yaffs_unlink_obj(struct yaffs_obj *obj)
4033 {
4034 	if (obj && obj->unlink_allowed)
4035 		return yaffs_unlink_worker(obj);
4036 
4037 	return YAFFS_FAIL;
4038 }
4039 
4040 int yaffs_unlinker(struct yaffs_obj *dir, const YCHAR *name)
4041 {
4042 	struct yaffs_obj *obj;
4043 
4044 	obj = yaffs_find_by_name(dir, name);
4045 	return yaffs_unlink_obj(obj);
4046 }
4047 
4048 /* Note:
4049  * If old_name is NULL then we take old_dir as the object to be renamed.
4050  */
4051 int yaffs_rename_obj(struct yaffs_obj *old_dir, const YCHAR *old_name,
4052 		     struct yaffs_obj *new_dir, const YCHAR *new_name)
4053 {
4054 	struct yaffs_obj *obj = NULL;
4055 	struct yaffs_obj *existing_target = NULL;
4056 	int force = 0;
4057 	int result;
4058 	struct yaffs_dev *dev;
4059 
4060 	if (!old_dir || old_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
4061 		BUG();
4062 		return YAFFS_FAIL;
4063 	}
4064 	if (!new_dir || new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
4065 		BUG();
4066 		return YAFFS_FAIL;
4067 	}
4068 
4069 	dev = old_dir->my_dev;
4070 
4071 #ifdef CONFIG_YAFFS_CASE_INSENSITIVE
4072 	/* Special case for case insemsitive systems.
4073 	 * While look-up is case insensitive, the name isn't.
4074 	 * Therefore we might want to change x.txt to X.txt
4075 	 */
4076 	if (old_dir == new_dir &&
4077 		old_name && new_name &&
4078 		strcmp(old_name, new_name) == 0)
4079 		force = 1;
4080 #endif
4081 
4082 	if (strnlen(new_name, YAFFS_MAX_NAME_LENGTH + 1) >
4083 	    YAFFS_MAX_NAME_LENGTH)
4084 		/* ENAMETOOLONG */
4085 		return YAFFS_FAIL;
4086 
4087 	if (old_name)
4088 		obj = yaffs_find_by_name(old_dir, old_name);
4089 	else{
4090 		obj = old_dir;
4091 		old_dir = obj->parent;
4092 	}
4093 
4094 	if (obj && obj->rename_allowed) {
4095 		/* Now handle an existing target, if there is one */
4096 		existing_target = yaffs_find_by_name(new_dir, new_name);
4097 		if (yaffs_is_non_empty_dir(existing_target)) {
4098 			return YAFFS_FAIL;	/* ENOTEMPTY */
4099 		} else if (existing_target && existing_target != obj) {
4100 			/* Nuke the target first, using shadowing,
4101 			 * but only if it isn't the same object.
4102 			 *
4103 			 * Note we must disable gc here otherwise it can mess
4104 			 * up the shadowing.
4105 			 *
4106 			 */
4107 			dev->gc_disable = 1;
4108 			yaffs_change_obj_name(obj, new_dir, new_name, force,
4109 					      existing_target->obj_id);
4110 			existing_target->is_shadowed = 1;
4111 			yaffs_unlink_obj(existing_target);
4112 			dev->gc_disable = 0;
4113 		}
4114 
4115 		result = yaffs_change_obj_name(obj, new_dir, new_name, 1, 0);
4116 
4117 		yaffs_update_parent(old_dir);
4118 		if (new_dir != old_dir)
4119 			yaffs_update_parent(new_dir);
4120 
4121 		return result;
4122 	}
4123 	return YAFFS_FAIL;
4124 }
4125 
4126 /*----------------------- Initialisation Scanning ---------------------- */
4127 
4128 void yaffs_handle_shadowed_obj(struct yaffs_dev *dev, int obj_id,
4129 			       int backward_scanning)
4130 {
4131 	struct yaffs_obj *obj;
4132 
4133 	if (backward_scanning) {
4134 		/* Handle YAFFS2 case (backward scanning)
4135 		 * If the shadowed object exists then ignore.
4136 		 */
4137 		obj = yaffs_find_by_number(dev, obj_id);
4138 		if (obj)
4139 			return;
4140 	}
4141 
4142 	/* Let's create it (if it does not exist) assuming it is a file so that
4143 	 * it can do shrinking etc.
4144 	 * We put it in unlinked dir to be cleaned up after the scanning
4145 	 */
4146 	obj =
4147 	    yaffs_find_or_create_by_number(dev, obj_id, YAFFS_OBJECT_TYPE_FILE);
4148 	if (!obj)
4149 		return;
4150 	obj->is_shadowed = 1;
4151 	yaffs_add_obj_to_dir(dev->unlinked_dir, obj);
4152 	obj->variant.file_variant.shrink_size = 0;
4153 	obj->valid = 1;		/* So that we don't read any other info. */
4154 }
4155 
4156 void yaffs_link_fixup(struct yaffs_dev *dev, struct list_head *hard_list)
4157 {
4158 	struct list_head *lh;
4159 	struct list_head *save;
4160 	struct yaffs_obj *hl;
4161 	struct yaffs_obj *in;
4162 
4163 	list_for_each_safe(lh, save, hard_list) {
4164 		hl = list_entry(lh, struct yaffs_obj, hard_links);
4165 		in = yaffs_find_by_number(dev,
4166 					hl->variant.hardlink_variant.equiv_id);
4167 
4168 		if (in) {
4169 			/* Add the hardlink pointers */
4170 			hl->variant.hardlink_variant.equiv_obj = in;
4171 			list_add(&hl->hard_links, &in->hard_links);
4172 		} else {
4173 			/* Todo Need to report/handle this better.
4174 			 * Got a problem... hardlink to a non-existant object
4175 			 */
4176 			hl->variant.hardlink_variant.equiv_obj = NULL;
4177 			INIT_LIST_HEAD(&hl->hard_links);
4178 		}
4179 	}
4180 }
4181 
4182 static void yaffs_strip_deleted_objs(struct yaffs_dev *dev)
4183 {
4184 	/*
4185 	 *  Sort out state of unlinked and deleted objects after scanning.
4186 	 */
4187 	struct list_head *i;
4188 	struct list_head *n;
4189 	struct yaffs_obj *l;
4190 
4191 	if (dev->read_only)
4192 		return;
4193 
4194 	/* Soft delete all the unlinked files */
4195 	list_for_each_safe(i, n,
4196 			   &dev->unlinked_dir->variant.dir_variant.children) {
4197 		l = list_entry(i, struct yaffs_obj, siblings);
4198 		yaffs_del_obj(l);
4199 	}
4200 
4201 	list_for_each_safe(i, n, &dev->del_dir->variant.dir_variant.children) {
4202 		l = list_entry(i, struct yaffs_obj, siblings);
4203 		yaffs_del_obj(l);
4204 	}
4205 }
4206 
4207 /*
4208  * This code iterates through all the objects making sure that they are rooted.
4209  * Any unrooted objects are re-rooted in lost+found.
4210  * An object needs to be in one of:
4211  * - Directly under deleted, unlinked
4212  * - Directly or indirectly under root.
4213  *
4214  * Note:
4215  *  This code assumes that we don't ever change the current relationships
4216  *  between directories:
4217  *   root_dir->parent == unlinked_dir->parent == del_dir->parent == NULL
4218  *   lost-n-found->parent == root_dir
4219  *
4220  * This fixes the problem where directories might have inadvertently been
4221  * deleted leaving the object "hanging" without being rooted in the
4222  * directory tree.
4223  */
4224 
4225 static int yaffs_has_null_parent(struct yaffs_dev *dev, struct yaffs_obj *obj)
4226 {
4227 	return (obj == dev->del_dir ||
4228 		obj == dev->unlinked_dir || obj == dev->root_dir);
4229 }
4230 
4231 static void yaffs_fix_hanging_objs(struct yaffs_dev *dev)
4232 {
4233 	struct yaffs_obj *obj;
4234 	struct yaffs_obj *parent;
4235 	int i;
4236 	struct list_head *lh;
4237 	struct list_head *n;
4238 	int depth_limit;
4239 	int hanging;
4240 
4241 	if (dev->read_only)
4242 		return;
4243 
4244 	/* Iterate through the objects in each hash entry,
4245 	 * looking at each object.
4246 	 * Make sure it is rooted.
4247 	 */
4248 
4249 	for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
4250 		list_for_each_safe(lh, n, &dev->obj_bucket[i].list) {
4251 			obj = list_entry(lh, struct yaffs_obj, hash_link);
4252 			parent = obj->parent;
4253 
4254 			if (yaffs_has_null_parent(dev, obj)) {
4255 				/* These directories are not hanging */
4256 				hanging = 0;
4257 			} else if (!parent ||
4258 				   parent->variant_type !=
4259 				   YAFFS_OBJECT_TYPE_DIRECTORY) {
4260 				hanging = 1;
4261 			} else if (yaffs_has_null_parent(dev, parent)) {
4262 				hanging = 0;
4263 			} else {
4264 				/*
4265 				 * Need to follow the parent chain to
4266 				 * see if it is hanging.
4267 				 */
4268 				hanging = 0;
4269 				depth_limit = 100;
4270 
4271 				while (parent != dev->root_dir &&
4272 				       parent->parent &&
4273 				       parent->parent->variant_type ==
4274 				       YAFFS_OBJECT_TYPE_DIRECTORY &&
4275 				       depth_limit > 0) {
4276 					parent = parent->parent;
4277 					depth_limit--;
4278 				}
4279 				if (parent != dev->root_dir)
4280 					hanging = 1;
4281 			}
4282 			if (hanging) {
4283 				yaffs_trace(YAFFS_TRACE_SCAN,
4284 					"Hanging object %d moved to lost and found",
4285 					obj->obj_id);
4286 				yaffs_add_obj_to_dir(dev->lost_n_found, obj);
4287 			}
4288 		}
4289 	}
4290 }
4291 
4292 /*
4293  * Delete directory contents for cleaning up lost and found.
4294  */
4295 static void yaffs_del_dir_contents(struct yaffs_obj *dir)
4296 {
4297 	struct yaffs_obj *obj;
4298 	struct list_head *lh;
4299 	struct list_head *n;
4300 
4301 	if (dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
4302 		BUG();
4303 
4304 	list_for_each_safe(lh, n, &dir->variant.dir_variant.children) {
4305 		obj = list_entry(lh, struct yaffs_obj, siblings);
4306 		if (obj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY)
4307 			yaffs_del_dir_contents(obj);
4308 		yaffs_trace(YAFFS_TRACE_SCAN,
4309 			"Deleting lost_found object %d",
4310 			obj->obj_id);
4311 		yaffs_unlink_obj(obj);
4312 	}
4313 }
4314 
4315 static void yaffs_empty_l_n_f(struct yaffs_dev *dev)
4316 {
4317 	yaffs_del_dir_contents(dev->lost_n_found);
4318 }
4319 
4320 
4321 struct yaffs_obj *yaffs_find_by_name(struct yaffs_obj *directory,
4322 				     const YCHAR *name)
4323 {
4324 	int sum;
4325 	struct list_head *i;
4326 	YCHAR buffer[YAFFS_MAX_NAME_LENGTH + 1];
4327 	struct yaffs_obj *l;
4328 
4329 	if (!name)
4330 		return NULL;
4331 
4332 	if (!directory) {
4333 		yaffs_trace(YAFFS_TRACE_ALWAYS,
4334 			"tragedy: yaffs_find_by_name: null pointer directory"
4335 			);
4336 		BUG();
4337 		return NULL;
4338 	}
4339 	if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
4340 		yaffs_trace(YAFFS_TRACE_ALWAYS,
4341 			"tragedy: yaffs_find_by_name: non-directory"
4342 			);
4343 		BUG();
4344 	}
4345 
4346 	sum = yaffs_calc_name_sum(name);
4347 
4348 	list_for_each(i, &directory->variant.dir_variant.children) {
4349 		l = list_entry(i, struct yaffs_obj, siblings);
4350 
4351 		if (l->parent != directory)
4352 			BUG();
4353 
4354 		yaffs_check_obj_details_loaded(l);
4355 
4356 		/* Special case for lost-n-found */
4357 		if (l->obj_id == YAFFS_OBJECTID_LOSTNFOUND) {
4358 			if (!strcmp(name, YAFFS_LOSTNFOUND_NAME))
4359 				return l;
4360 		} else if (l->sum == sum || l->hdr_chunk <= 0) {
4361 			/* LostnFound chunk called Objxxx
4362 			 * Do a real check
4363 			 */
4364 			yaffs_get_obj_name(l, buffer,
4365 				YAFFS_MAX_NAME_LENGTH + 1);
4366 			if (!strncmp(name, buffer, YAFFS_MAX_NAME_LENGTH))
4367 				return l;
4368 		}
4369 	}
4370 	return NULL;
4371 }
4372 
4373 /* GetEquivalentObject dereferences any hard links to get to the
4374  * actual object.
4375  */
4376 
4377 struct yaffs_obj *yaffs_get_equivalent_obj(struct yaffs_obj *obj)
4378 {
4379 	if (obj && obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK) {
4380 		obj = obj->variant.hardlink_variant.equiv_obj;
4381 		yaffs_check_obj_details_loaded(obj);
4382 	}
4383 	return obj;
4384 }
4385 
4386 /*
4387  *  A note or two on object names.
4388  *  * If the object name is missing, we then make one up in the form objnnn
4389  *
4390  *  * ASCII names are stored in the object header's name field from byte zero
4391  *  * Unicode names are historically stored starting from byte zero.
4392  *
4393  * Then there are automatic Unicode names...
4394  * The purpose of these is to save names in a way that can be read as
4395  * ASCII or Unicode names as appropriate, thus allowing a Unicode and ASCII
4396  * system to share files.
4397  *
4398  * These automatic unicode are stored slightly differently...
4399  *  - If the name can fit in the ASCII character space then they are saved as
4400  *    ascii names as per above.
4401  *  - If the name needs Unicode then the name is saved in Unicode
4402  *    starting at oh->name[1].
4403 
4404  */
4405 static void yaffs_fix_null_name(struct yaffs_obj *obj, YCHAR *name,
4406 				int buffer_size)
4407 {
4408 	/* Create an object name if we could not find one. */
4409 	if (strnlen(name, YAFFS_MAX_NAME_LENGTH) == 0) {
4410 		YCHAR local_name[20];
4411 		YCHAR num_string[20];
4412 		YCHAR *x = &num_string[19];
4413 		unsigned v = obj->obj_id;
4414 		num_string[19] = 0;
4415 		while (v > 0) {
4416 			x--;
4417 			*x = '0' + (v % 10);
4418 			v /= 10;
4419 		}
4420 		/* make up a name */
4421 		strcpy(local_name, YAFFS_LOSTNFOUND_PREFIX);
4422 		strcat(local_name, x);
4423 		strncpy(name, local_name, buffer_size - 1);
4424 	}
4425 }
4426 
4427 int yaffs_get_obj_name(struct yaffs_obj *obj, YCHAR *name, int buffer_size)
4428 {
4429 	memset(name, 0, buffer_size * sizeof(YCHAR));
4430 	yaffs_check_obj_details_loaded(obj);
4431 	if (obj->obj_id == YAFFS_OBJECTID_LOSTNFOUND) {
4432 		strncpy(name, YAFFS_LOSTNFOUND_NAME, buffer_size - 1);
4433 	} else if (obj->short_name[0]) {
4434 		strcpy(name, obj->short_name);
4435 	} else if (obj->hdr_chunk > 0) {
4436 		int result;
4437 		u8 *buffer = yaffs_get_temp_buffer(obj->my_dev);
4438 
4439 		struct yaffs_obj_hdr *oh = (struct yaffs_obj_hdr *)buffer;
4440 
4441 		memset(buffer, 0, obj->my_dev->data_bytes_per_chunk);
4442 
4443 		if (obj->hdr_chunk > 0) {
4444 			result = yaffs_rd_chunk_tags_nand(obj->my_dev,
4445 							  obj->hdr_chunk,
4446 							  buffer, NULL);
4447 		}
4448 		yaffs_load_name_from_oh(obj->my_dev, name, oh->name,
4449 					buffer_size);
4450 
4451 		yaffs_release_temp_buffer(obj->my_dev, buffer);
4452 	}
4453 
4454 	yaffs_fix_null_name(obj, name, buffer_size);
4455 
4456 	return strnlen(name, YAFFS_MAX_NAME_LENGTH);
4457 }
4458 
4459 loff_t yaffs_get_obj_length(struct yaffs_obj *obj)
4460 {
4461 	/* Dereference any hard linking */
4462 	obj = yaffs_get_equivalent_obj(obj);
4463 
4464 	if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
4465 		return obj->variant.file_variant.file_size;
4466 	if (obj->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) {
4467 		if (!obj->variant.symlink_variant.alias)
4468 			return 0;
4469 		return strnlen(obj->variant.symlink_variant.alias,
4470 				     YAFFS_MAX_ALIAS_LENGTH);
4471 	} else {
4472 		/* Only a directory should drop through to here */
4473 		return obj->my_dev->data_bytes_per_chunk;
4474 	}
4475 }
4476 
4477 int yaffs_get_obj_link_count(struct yaffs_obj *obj)
4478 {
4479 	int count = 0;
4480 	struct list_head *i;
4481 
4482 	if (!obj->unlinked)
4483 		count++;	/* the object itself */
4484 
4485 	list_for_each(i, &obj->hard_links)
4486 	    count++;		/* add the hard links; */
4487 
4488 	return count;
4489 }
4490 
4491 int yaffs_get_obj_inode(struct yaffs_obj *obj)
4492 {
4493 	obj = yaffs_get_equivalent_obj(obj);
4494 
4495 	return obj->obj_id;
4496 }
4497 
4498 unsigned yaffs_get_obj_type(struct yaffs_obj *obj)
4499 {
4500 	obj = yaffs_get_equivalent_obj(obj);
4501 
4502 	switch (obj->variant_type) {
4503 	case YAFFS_OBJECT_TYPE_FILE:
4504 		return DT_REG;
4505 		break;
4506 	case YAFFS_OBJECT_TYPE_DIRECTORY:
4507 		return DT_DIR;
4508 		break;
4509 	case YAFFS_OBJECT_TYPE_SYMLINK:
4510 		return DT_LNK;
4511 		break;
4512 	case YAFFS_OBJECT_TYPE_HARDLINK:
4513 		return DT_REG;
4514 		break;
4515 	case YAFFS_OBJECT_TYPE_SPECIAL:
4516 		if (S_ISFIFO(obj->yst_mode))
4517 			return DT_FIFO;
4518 		if (S_ISCHR(obj->yst_mode))
4519 			return DT_CHR;
4520 		if (S_ISBLK(obj->yst_mode))
4521 			return DT_BLK;
4522 		if (S_ISSOCK(obj->yst_mode))
4523 			return DT_SOCK;
4524 		return DT_REG;
4525 		break;
4526 	default:
4527 		return DT_REG;
4528 		break;
4529 	}
4530 }
4531 
4532 YCHAR *yaffs_get_symlink_alias(struct yaffs_obj *obj)
4533 {
4534 	obj = yaffs_get_equivalent_obj(obj);
4535 	if (obj->variant_type == YAFFS_OBJECT_TYPE_SYMLINK)
4536 		return yaffs_clone_str(obj->variant.symlink_variant.alias);
4537 	else
4538 		return yaffs_clone_str(_Y(""));
4539 }
4540 
4541 /*--------------------------- Initialisation code -------------------------- */
4542 
4543 static int yaffs_check_dev_fns(struct yaffs_dev *dev)
4544 {
4545 	struct yaffs_driver *drv = &dev->drv;
4546 	struct yaffs_tags_handler *tagger = &dev->tagger;
4547 
4548 	/* Common functions, gotta have */
4549 	if (!drv->drv_read_chunk_fn ||
4550 	    !drv->drv_write_chunk_fn ||
4551 	    !drv->drv_erase_fn)
4552 		return 0;
4553 
4554 	if (dev->param.is_yaffs2 &&
4555 	     (!drv->drv_mark_bad_fn  || !drv->drv_check_bad_fn))
4556 		return 0;
4557 
4558 	/* Install the default tags marshalling functions if needed. */
4559 	yaffs_tags_compat_install(dev);
4560 	yaffs_tags_marshall_install(dev);
4561 
4562 	/* Check we now have the marshalling functions required. */
4563 	if (!tagger->write_chunk_tags_fn ||
4564 	    !tagger->read_chunk_tags_fn ||
4565 	    !tagger->query_block_fn ||
4566 	    !tagger->mark_bad_fn)
4567 		return 0;
4568 
4569 	return 1;
4570 }
4571 
4572 static int yaffs_create_initial_dir(struct yaffs_dev *dev)
4573 {
4574 	/* Initialise the unlinked, deleted, root and lost+found directories */
4575 	dev->lost_n_found = dev->root_dir = NULL;
4576 	dev->unlinked_dir = dev->del_dir = NULL;
4577 	dev->unlinked_dir =
4578 	    yaffs_create_fake_dir(dev, YAFFS_OBJECTID_UNLINKED, S_IFDIR);
4579 	dev->del_dir =
4580 	    yaffs_create_fake_dir(dev, YAFFS_OBJECTID_DELETED, S_IFDIR);
4581 	dev->root_dir =
4582 	    yaffs_create_fake_dir(dev, YAFFS_OBJECTID_ROOT,
4583 				  YAFFS_ROOT_MODE | S_IFDIR);
4584 	dev->lost_n_found =
4585 	    yaffs_create_fake_dir(dev, YAFFS_OBJECTID_LOSTNFOUND,
4586 				  YAFFS_LOSTNFOUND_MODE | S_IFDIR);
4587 
4588 	if (dev->lost_n_found && dev->root_dir && dev->unlinked_dir
4589 	    && dev->del_dir) {
4590 		yaffs_add_obj_to_dir(dev->root_dir, dev->lost_n_found);
4591 		return YAFFS_OK;
4592 	}
4593 	return YAFFS_FAIL;
4594 }
4595 
4596 int yaffs_guts_initialise(struct yaffs_dev *dev)
4597 {
4598 	int init_failed = 0;
4599 	unsigned x;
4600 	int bits;
4601 
4602 	yaffs_trace(YAFFS_TRACE_TRACING, "yaffs: yaffs_guts_initialise()");
4603 
4604 	/* Check stuff that must be set */
4605 
4606 	if (!dev) {
4607 		yaffs_trace(YAFFS_TRACE_ALWAYS,
4608 			"yaffs: Need a device"
4609 			);
4610 		return YAFFS_FAIL;
4611 	}
4612 
4613 	if (dev->is_mounted) {
4614 		yaffs_trace(YAFFS_TRACE_ALWAYS, "device already mounted");
4615 		return YAFFS_FAIL;
4616 	}
4617 
4618 	dev->internal_start_block = dev->param.start_block;
4619 	dev->internal_end_block = dev->param.end_block;
4620 	dev->block_offset = 0;
4621 	dev->chunk_offset = 0;
4622 	dev->n_free_chunks = 0;
4623 
4624 	dev->gc_block = 0;
4625 
4626 	if (dev->param.start_block == 0) {
4627 		dev->internal_start_block = dev->param.start_block + 1;
4628 		dev->internal_end_block = dev->param.end_block + 1;
4629 		dev->block_offset = 1;
4630 		dev->chunk_offset = dev->param.chunks_per_block;
4631 	}
4632 
4633 	/* Check geometry parameters. */
4634 
4635 	if ((!dev->param.inband_tags && dev->param.is_yaffs2 &&
4636 		dev->param.total_bytes_per_chunk < 1024) ||
4637 		(!dev->param.is_yaffs2 &&
4638 			dev->param.total_bytes_per_chunk < 512) ||
4639 		(dev->param.inband_tags && !dev->param.is_yaffs2) ||
4640 		 dev->param.chunks_per_block < 2 ||
4641 		 dev->param.n_reserved_blocks < 2 ||
4642 		dev->internal_start_block <= 0 ||
4643 		dev->internal_end_block <= 0 ||
4644 		dev->internal_end_block <=
4645 		(dev->internal_start_block + dev->param.n_reserved_blocks + 2)
4646 		) {
4647 		/* otherwise it is too small */
4648 		yaffs_trace(YAFFS_TRACE_ALWAYS,
4649 			"NAND geometry problems: chunk size %d, type is yaffs%s, inband_tags %d ",
4650 			dev->param.total_bytes_per_chunk,
4651 			dev->param.is_yaffs2 ? "2" : "",
4652 			dev->param.inband_tags);
4653 		return YAFFS_FAIL;
4654 	}
4655 
4656 	if (yaffs_init_nand(dev) != YAFFS_OK) {
4657 		yaffs_trace(YAFFS_TRACE_ALWAYS, "InitialiseNAND failed");
4658 		return YAFFS_FAIL;
4659 	}
4660 
4661 	/* Sort out space for inband tags, if required */
4662 	if (dev->param.inband_tags)
4663 		dev->data_bytes_per_chunk =
4664 		    dev->param.total_bytes_per_chunk -
4665 		    sizeof(struct yaffs_packed_tags2_tags_only);
4666 	else
4667 		dev->data_bytes_per_chunk = dev->param.total_bytes_per_chunk;
4668 
4669 	/* Got the right mix of functions? */
4670 	if (!yaffs_check_dev_fns(dev)) {
4671 		/* Function missing */
4672 		yaffs_trace(YAFFS_TRACE_ALWAYS,
4673 			"device function(s) missing or wrong");
4674 
4675 		return YAFFS_FAIL;
4676 	}
4677 
4678 	/* Finished with most checks. Further checks happen later on too. */
4679 
4680 	dev->is_mounted = 1;
4681 
4682 	/* OK now calculate a few things for the device */
4683 
4684 	/*
4685 	 *  Calculate all the chunk size manipulation numbers:
4686 	 */
4687 	x = dev->data_bytes_per_chunk;
4688 	/* We always use dev->chunk_shift and dev->chunk_div */
4689 	dev->chunk_shift = calc_shifts(x);
4690 	x >>= dev->chunk_shift;
4691 	dev->chunk_div = x;
4692 	/* We only use chunk mask if chunk_div is 1 */
4693 	dev->chunk_mask = (1 << dev->chunk_shift) - 1;
4694 
4695 	/*
4696 	 * Calculate chunk_grp_bits.
4697 	 * We need to find the next power of 2 > than internal_end_block
4698 	 */
4699 
4700 	x = dev->param.chunks_per_block * (dev->internal_end_block + 1);
4701 
4702 	bits = calc_shifts_ceiling(x);
4703 
4704 	/* Set up tnode width if wide tnodes are enabled. */
4705 	if (!dev->param.wide_tnodes_disabled) {
4706 		/* bits must be even so that we end up with 32-bit words */
4707 		if (bits & 1)
4708 			bits++;
4709 		if (bits < 16)
4710 			dev->tnode_width = 16;
4711 		else
4712 			dev->tnode_width = bits;
4713 	} else {
4714 		dev->tnode_width = 16;
4715 	}
4716 
4717 	dev->tnode_mask = (1 << dev->tnode_width) - 1;
4718 
4719 	/* Level0 Tnodes are 16 bits or wider (if wide tnodes are enabled),
4720 	 * so if the bitwidth of the
4721 	 * chunk range we're using is greater than 16 we need
4722 	 * to figure out chunk shift and chunk_grp_size
4723 	 */
4724 
4725 	if (bits <= dev->tnode_width)
4726 		dev->chunk_grp_bits = 0;
4727 	else
4728 		dev->chunk_grp_bits = bits - dev->tnode_width;
4729 
4730 	dev->tnode_size = (dev->tnode_width * YAFFS_NTNODES_LEVEL0) / 8;
4731 	if (dev->tnode_size < sizeof(struct yaffs_tnode))
4732 		dev->tnode_size = sizeof(struct yaffs_tnode);
4733 
4734 	dev->chunk_grp_size = 1 << dev->chunk_grp_bits;
4735 
4736 	if (dev->param.chunks_per_block < dev->chunk_grp_size) {
4737 		/* We have a problem because the soft delete won't work if
4738 		 * the chunk group size > chunks per block.
4739 		 * This can be remedied by using larger "virtual blocks".
4740 		 */
4741 		yaffs_trace(YAFFS_TRACE_ALWAYS, "chunk group too large");
4742 
4743 		return YAFFS_FAIL;
4744 	}
4745 
4746 	/* Finished verifying the device, continue with initialisation */
4747 
4748 	/* More device initialisation */
4749 	dev->all_gcs = 0;
4750 	dev->passive_gc_count = 0;
4751 	dev->oldest_dirty_gc_count = 0;
4752 	dev->bg_gcs = 0;
4753 	dev->gc_block_finder = 0;
4754 	dev->buffered_block = -1;
4755 	dev->doing_buffered_block_rewrite = 0;
4756 	dev->n_deleted_files = 0;
4757 	dev->n_bg_deletions = 0;
4758 	dev->n_unlinked_files = 0;
4759 	dev->n_ecc_fixed = 0;
4760 	dev->n_ecc_unfixed = 0;
4761 	dev->n_tags_ecc_fixed = 0;
4762 	dev->n_tags_ecc_unfixed = 0;
4763 	dev->n_erase_failures = 0;
4764 	dev->n_erased_blocks = 0;
4765 	dev->gc_disable = 0;
4766 	dev->has_pending_prioritised_gc = 1;
4767 		/* Assume the worst for now, will get fixed on first GC */
4768 	INIT_LIST_HEAD(&dev->dirty_dirs);
4769 	dev->oldest_dirty_seq = 0;
4770 	dev->oldest_dirty_block = 0;
4771 
4772 	/* Initialise temporary buffers and caches. */
4773 	if (!yaffs_init_tmp_buffers(dev))
4774 		init_failed = 1;
4775 
4776 	dev->cache = NULL;
4777 	dev->gc_cleanup_list = NULL;
4778 
4779 	if (!init_failed && dev->param.n_caches > 0) {
4780 		int i;
4781 		void *buf;
4782 		int cache_bytes =
4783 		    dev->param.n_caches * sizeof(struct yaffs_cache);
4784 
4785 		if (dev->param.n_caches > YAFFS_MAX_SHORT_OP_CACHES)
4786 			dev->param.n_caches = YAFFS_MAX_SHORT_OP_CACHES;
4787 
4788 		dev->cache = kmalloc(cache_bytes, GFP_NOFS);
4789 
4790 		buf = (u8 *) dev->cache;
4791 
4792 		if (dev->cache)
4793 			memset(dev->cache, 0, cache_bytes);
4794 
4795 		for (i = 0; i < dev->param.n_caches && buf; i++) {
4796 			dev->cache[i].object = NULL;
4797 			dev->cache[i].last_use = 0;
4798 			dev->cache[i].dirty = 0;
4799 			dev->cache[i].data = buf =
4800 			    kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
4801 		}
4802 		if (!buf)
4803 			init_failed = 1;
4804 
4805 		dev->cache_last_use = 0;
4806 	}
4807 
4808 	dev->cache_hits = 0;
4809 
4810 	if (!init_failed) {
4811 		dev->gc_cleanup_list =
4812 		    kmalloc(dev->param.chunks_per_block * sizeof(u32),
4813 					GFP_NOFS);
4814 		if (!dev->gc_cleanup_list)
4815 			init_failed = 1;
4816 	}
4817 
4818 	if (dev->param.is_yaffs2)
4819 		dev->param.use_header_file_size = 1;
4820 
4821 	if (!init_failed && !yaffs_init_blocks(dev))
4822 		init_failed = 1;
4823 
4824 	yaffs_init_tnodes_and_objs(dev);
4825 
4826 	if (!init_failed && !yaffs_create_initial_dir(dev))
4827 		init_failed = 1;
4828 
4829 	if (!init_failed && dev->param.is_yaffs2 &&
4830 		!dev->param.disable_summary &&
4831 		!yaffs_summary_init(dev))
4832 		init_failed = 1;
4833 
4834 	if (!init_failed) {
4835 		/* Now scan the flash. */
4836 		if (dev->param.is_yaffs2) {
4837 			if (yaffs2_checkpt_restore(dev)) {
4838 				yaffs_check_obj_details_loaded(dev->root_dir);
4839 				yaffs_trace(YAFFS_TRACE_CHECKPOINT |
4840 					YAFFS_TRACE_MOUNT,
4841 					"yaffs: restored from checkpoint"
4842 					);
4843 			} else {
4844 
4845 				/* Clean up the mess caused by an aborted
4846 				 * checkpoint load then scan backwards.
4847 				 */
4848 				yaffs_deinit_blocks(dev);
4849 
4850 				yaffs_deinit_tnodes_and_objs(dev);
4851 
4852 				dev->n_erased_blocks = 0;
4853 				dev->n_free_chunks = 0;
4854 				dev->alloc_block = -1;
4855 				dev->alloc_page = -1;
4856 				dev->n_deleted_files = 0;
4857 				dev->n_unlinked_files = 0;
4858 				dev->n_bg_deletions = 0;
4859 
4860 				if (!init_failed && !yaffs_init_blocks(dev))
4861 					init_failed = 1;
4862 
4863 				yaffs_init_tnodes_and_objs(dev);
4864 
4865 				if (!init_failed
4866 				    && !yaffs_create_initial_dir(dev))
4867 					init_failed = 1;
4868 
4869 				if (!init_failed && !yaffs2_scan_backwards(dev))
4870 					init_failed = 1;
4871 			}
4872 		} else if (!yaffs1_scan(dev)) {
4873 			init_failed = 1;
4874 		}
4875 
4876 		yaffs_strip_deleted_objs(dev);
4877 		yaffs_fix_hanging_objs(dev);
4878 		if (dev->param.empty_lost_n_found)
4879 			yaffs_empty_l_n_f(dev);
4880 	}
4881 
4882 	if (init_failed) {
4883 		/* Clean up the mess */
4884 		yaffs_trace(YAFFS_TRACE_TRACING,
4885 		  "yaffs: yaffs_guts_initialise() aborted.");
4886 
4887 		yaffs_deinitialise(dev);
4888 		return YAFFS_FAIL;
4889 	}
4890 
4891 	/* Zero out stats */
4892 	dev->n_page_reads = 0;
4893 	dev->n_page_writes = 0;
4894 	dev->n_erasures = 0;
4895 	dev->n_gc_copies = 0;
4896 	dev->n_retried_writes = 0;
4897 
4898 	dev->n_retired_blocks = 0;
4899 
4900 	yaffs_verify_free_chunks(dev);
4901 	yaffs_verify_blocks(dev);
4902 
4903 	/* Clean up any aborted checkpoint data */
4904 	if (!dev->is_checkpointed && dev->blocks_in_checkpt > 0)
4905 		yaffs2_checkpt_invalidate(dev);
4906 
4907 	yaffs_trace(YAFFS_TRACE_TRACING,
4908 	  "yaffs: yaffs_guts_initialise() done.");
4909 	return YAFFS_OK;
4910 }
4911 
4912 void yaffs_deinitialise(struct yaffs_dev *dev)
4913 {
4914 	if (dev->is_mounted) {
4915 		int i;
4916 
4917 		yaffs_deinit_blocks(dev);
4918 		yaffs_deinit_tnodes_and_objs(dev);
4919 		yaffs_summary_deinit(dev);
4920 
4921 		if (dev->param.n_caches > 0 && dev->cache) {
4922 
4923 			for (i = 0; i < dev->param.n_caches; i++) {
4924 				kfree(dev->cache[i].data);
4925 				dev->cache[i].data = NULL;
4926 			}
4927 
4928 			kfree(dev->cache);
4929 			dev->cache = NULL;
4930 		}
4931 
4932 		kfree(dev->gc_cleanup_list);
4933 
4934 		for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++)
4935 			kfree(dev->temp_buffer[i].buffer);
4936 
4937 		dev->is_mounted = 0;
4938 
4939 		yaffs_deinit_nand(dev);
4940 	}
4941 }
4942 
4943 int yaffs_count_free_chunks(struct yaffs_dev *dev)
4944 {
4945 	int n_free = 0;
4946 	int b;
4947 	struct yaffs_block_info *blk;
4948 
4949 	blk = dev->block_info;
4950 	for (b = dev->internal_start_block; b <= dev->internal_end_block; b++) {
4951 		switch (blk->block_state) {
4952 		case YAFFS_BLOCK_STATE_EMPTY:
4953 		case YAFFS_BLOCK_STATE_ALLOCATING:
4954 		case YAFFS_BLOCK_STATE_COLLECTING:
4955 		case YAFFS_BLOCK_STATE_FULL:
4956 			n_free +=
4957 			    (dev->param.chunks_per_block - blk->pages_in_use +
4958 			     blk->soft_del_pages);
4959 			break;
4960 		default:
4961 			break;
4962 		}
4963 		blk++;
4964 	}
4965 	return n_free;
4966 }
4967 
4968 int yaffs_get_n_free_chunks(struct yaffs_dev *dev)
4969 {
4970 	/* This is what we report to the outside world */
4971 	int n_free;
4972 	int n_dirty_caches;
4973 	int blocks_for_checkpt;
4974 	int i;
4975 
4976 	n_free = dev->n_free_chunks;
4977 	n_free += dev->n_deleted_files;
4978 
4979 	/* Now count and subtract the number of dirty chunks in the cache. */
4980 
4981 	for (n_dirty_caches = 0, i = 0; i < dev->param.n_caches; i++) {
4982 		if (dev->cache[i].dirty)
4983 			n_dirty_caches++;
4984 	}
4985 
4986 	n_free -= n_dirty_caches;
4987 
4988 	n_free -=
4989 	    ((dev->param.n_reserved_blocks + 1) * dev->param.chunks_per_block);
4990 
4991 	/* Now figure checkpoint space and report that... */
4992 	blocks_for_checkpt = yaffs_calc_checkpt_blocks_required(dev);
4993 
4994 	n_free -= (blocks_for_checkpt * dev->param.chunks_per_block);
4995 
4996 	if (n_free < 0)
4997 		n_free = 0;
4998 
4999 	return n_free;
5000 }
5001 
5002 
5003 int yaffs_format_dev(struct yaffs_dev *dev)
5004 {
5005 	int i;
5006 	enum yaffs_block_state state;
5007 	u32 dummy;
5008 
5009 	if(dev->is_mounted)
5010 		return YAFFS_FAIL;
5011 
5012 	/*
5013 	* The runtime variables might not have been set up,
5014 	* so set up what we need.
5015 	*/
5016 	dev->internal_start_block = dev->param.start_block;
5017 	dev->internal_end_block = dev->param.end_block;
5018 	dev->block_offset = 0;
5019 	dev->chunk_offset = 0;
5020 
5021 	if (dev->param.start_block == 0) {
5022 		dev->internal_start_block = dev->param.start_block + 1;
5023 		dev->internal_end_block = dev->param.end_block + 1;
5024 		dev->block_offset = 1;
5025 		dev->chunk_offset = dev->param.chunks_per_block;
5026 	}
5027 
5028 	for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
5029 		yaffs_query_init_block_state(dev, i, &state, &dummy);
5030 		if (state != YAFFS_BLOCK_STATE_DEAD)
5031 			yaffs_erase_block(dev, i);
5032 	}
5033 
5034 	return YAFFS_OK;
5035 }
5036 
5037 
5038 /*
5039  * Marshalling functions to get loff_t file sizes into and out of
5040  * object headers.
5041  */
5042 void yaffs_oh_size_load(struct yaffs_obj_hdr *oh, loff_t fsize)
5043 {
5044 	oh->file_size_low = (fsize & 0xFFFFFFFF);
5045 	oh->file_size_high = ((fsize >> 32) & 0xFFFFFFFF);
5046 }
5047 
5048 loff_t yaffs_oh_to_size(struct yaffs_obj_hdr *oh)
5049 {
5050 	loff_t retval;
5051 
5052 	if (sizeof(loff_t) >= 8 && ~(oh->file_size_high))
5053 		retval = (((loff_t) oh->file_size_high) << 32) |
5054 			(((loff_t) oh->file_size_low) & 0xFFFFFFFF);
5055 	else
5056 		retval = (loff_t) oh->file_size_low;
5057 
5058 	return retval;
5059 }
5060