• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * JFFS2 -- Journalling Flash File System, Version 2.
3  *
4  * Copyright © 2001-2007 Red Hat, Inc.
5  * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
6  *
7  * Created by David Woodhouse <dwmw2@infradead.org>
8  *
9  * For licensing information, see the file 'LICENCE' in this directory.
10  *
11  */
12 
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 
15 #include <linux/capability.h>
16 #include <linux/kernel.h>
17 #include <linux/sched.h>
18 #include <linux/cred.h>
19 #include <linux/fs.h>
20 #include <linux/fs_context.h>
21 #include <linux/list.h>
22 #include <linux/mtd/mtd.h>
23 #include <linux/pagemap.h>
24 #include <linux/slab.h>
25 #include <linux/vmalloc.h>
26 #include <linux/vfs.h>
27 #include <linux/crc32.h>
28 #include "nodelist.h"
29 
30 static int jffs2_flash_setup(struct jffs2_sb_info *c);
31 
jffs2_do_setattr(struct inode * inode,struct iattr * iattr)32 int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
33 {
34 	struct jffs2_full_dnode *old_metadata, *new_metadata;
35 	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
36 	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
37 	struct jffs2_raw_inode *ri;
38 	union jffs2_device_node dev;
39 	unsigned char *mdata = NULL;
40 	int mdatalen = 0;
41 	unsigned int ivalid;
42 	uint32_t alloclen;
43 	int ret;
44 	int alloc_type = ALLOC_NORMAL;
45 
46 	jffs2_dbg(1, "%s(): ino #%lu\n", __func__, inode->i_ino);
47 
48 	/* Special cases - we don't want more than one data node
49 	   for these types on the medium at any time. So setattr
50 	   must read the original data associated with the node
51 	   (i.e. the device numbers or the target name) and write
52 	   it out again with the appropriate data attached */
53 	if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) {
54 		/* For these, we don't actually need to read the old node */
55 		mdatalen = jffs2_encode_dev(&dev, inode->i_rdev);
56 		mdata = (char *)&dev;
57 		jffs2_dbg(1, "%s(): Writing %d bytes of kdev_t\n",
58 			  __func__, mdatalen);
59 	} else if (S_ISLNK(inode->i_mode)) {
60 		mutex_lock(&f->sem);
61 		mdatalen = f->metadata->size;
62 		mdata = kmalloc(f->metadata->size, GFP_USER);
63 		if (!mdata) {
64 			mutex_unlock(&f->sem);
65 			return -ENOMEM;
66 		}
67 		ret = jffs2_read_dnode(c, f, f->metadata, mdata, 0, mdatalen);
68 		if (ret) {
69 			mutex_unlock(&f->sem);
70 			kfree(mdata);
71 			return ret;
72 		}
73 		mutex_unlock(&f->sem);
74 		jffs2_dbg(1, "%s(): Writing %d bytes of symlink target\n",
75 			  __func__, mdatalen);
76 	}
77 
78 	ri = jffs2_alloc_raw_inode();
79 	if (!ri) {
80 		if (S_ISLNK(inode->i_mode))
81 			kfree(mdata);
82 		return -ENOMEM;
83 	}
84 
85 	ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &alloclen,
86 				  ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
87 	if (ret) {
88 		jffs2_free_raw_inode(ri);
89 		if (S_ISLNK(inode->i_mode))
90 			 kfree(mdata);
91 		return ret;
92 	}
93 	mutex_lock(&f->sem);
94 	ivalid = iattr->ia_valid;
95 
96 	ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
97 	ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
98 	ri->totlen = cpu_to_je32(sizeof(*ri) + mdatalen);
99 	ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4));
100 
101 	ri->ino = cpu_to_je32(inode->i_ino);
102 	ri->version = cpu_to_je32(++f->highest_version);
103 
104 	ri->uid = cpu_to_je16((ivalid & ATTR_UID)?
105 		from_kuid(&init_user_ns, iattr->ia_uid):i_uid_read(inode));
106 	ri->gid = cpu_to_je16((ivalid & ATTR_GID)?
107 		from_kgid(&init_user_ns, iattr->ia_gid):i_gid_read(inode));
108 
109 	if (ivalid & ATTR_MODE)
110 		ri->mode = cpu_to_jemode(iattr->ia_mode);
111 	else
112 		ri->mode = cpu_to_jemode(inode->i_mode);
113 
114 
115 	ri->isize = cpu_to_je32((ivalid & ATTR_SIZE)?iattr->ia_size:inode->i_size);
116 	ri->atime = cpu_to_je32(I_SEC((ivalid & ATTR_ATIME)?iattr->ia_atime:inode->i_atime));
117 	ri->mtime = cpu_to_je32(I_SEC((ivalid & ATTR_MTIME)?iattr->ia_mtime:inode->i_mtime));
118 	ri->ctime = cpu_to_je32(I_SEC((ivalid & ATTR_CTIME)?iattr->ia_ctime:inode->i_ctime));
119 
120 	ri->offset = cpu_to_je32(0);
121 	ri->csize = ri->dsize = cpu_to_je32(mdatalen);
122 	ri->compr = JFFS2_COMPR_NONE;
123 	if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
124 		/* It's an extension. Make it a hole node */
125 		ri->compr = JFFS2_COMPR_ZERO;
126 		ri->dsize = cpu_to_je32(iattr->ia_size - inode->i_size);
127 		ri->offset = cpu_to_je32(inode->i_size);
128 	} else if (ivalid & ATTR_SIZE && !iattr->ia_size) {
129 		/* For truncate-to-zero, treat it as deletion because
130 		   it'll always be obsoleting all previous nodes */
131 		alloc_type = ALLOC_DELETION;
132 	}
133 	ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
134 	if (mdatalen)
135 		ri->data_crc = cpu_to_je32(crc32(0, mdata, mdatalen));
136 	else
137 		ri->data_crc = cpu_to_je32(0);
138 
139 	new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, alloc_type);
140 	if (S_ISLNK(inode->i_mode))
141 		kfree(mdata);
142 
143 	if (IS_ERR(new_metadata)) {
144 		jffs2_complete_reservation(c);
145 		jffs2_free_raw_inode(ri);
146 		mutex_unlock(&f->sem);
147 		return PTR_ERR(new_metadata);
148 	}
149 	/* It worked. Update the inode */
150 	inode->i_atime = ITIME(je32_to_cpu(ri->atime));
151 	inode->i_ctime = ITIME(je32_to_cpu(ri->ctime));
152 	inode->i_mtime = ITIME(je32_to_cpu(ri->mtime));
153 	inode->i_mode = jemode_to_cpu(ri->mode);
154 	i_uid_write(inode, je16_to_cpu(ri->uid));
155 	i_gid_write(inode, je16_to_cpu(ri->gid));
156 
157 
158 	old_metadata = f->metadata;
159 
160 	if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size)
161 		jffs2_truncate_fragtree (c, &f->fragtree, iattr->ia_size);
162 
163 	if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
164 		jffs2_add_full_dnode_to_inode(c, f, new_metadata);
165 		inode->i_size = iattr->ia_size;
166 		inode->i_blocks = (inode->i_size + 511) >> 9;
167 		f->metadata = NULL;
168 	} else {
169 		f->metadata = new_metadata;
170 	}
171 	if (old_metadata) {
172 		jffs2_mark_node_obsolete(c, old_metadata->raw);
173 		jffs2_free_full_dnode(old_metadata);
174 	}
175 	jffs2_free_raw_inode(ri);
176 
177 	mutex_unlock(&f->sem);
178 	jffs2_complete_reservation(c);
179 
180 	/* We have to do the truncate_setsize() without f->sem held, since
181 	   some pages may be locked and waiting for it in readpage().
182 	   We are protected from a simultaneous write() extending i_size
183 	   back past iattr->ia_size, because do_truncate() holds the
184 	   generic inode semaphore. */
185 	if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) {
186 		truncate_setsize(inode, iattr->ia_size);
187 		inode->i_blocks = (inode->i_size + 511) >> 9;
188 	}
189 
190 	return 0;
191 }
192 
jffs2_setattr(struct dentry * dentry,struct iattr * iattr)193 int jffs2_setattr(struct dentry *dentry, struct iattr *iattr)
194 {
195 	struct inode *inode = d_inode(dentry);
196 	int rc;
197 
198 	rc = setattr_prepare(dentry, iattr);
199 	if (rc)
200 		return rc;
201 
202 	rc = jffs2_do_setattr(inode, iattr);
203 	if (!rc && (iattr->ia_valid & ATTR_MODE))
204 		rc = posix_acl_chmod(inode, inode->i_mode);
205 
206 	return rc;
207 }
208 
jffs2_statfs(struct dentry * dentry,struct kstatfs * buf)209 int jffs2_statfs(struct dentry *dentry, struct kstatfs *buf)
210 {
211 	struct jffs2_sb_info *c = JFFS2_SB_INFO(dentry->d_sb);
212 	unsigned long avail;
213 
214 	buf->f_type = JFFS2_SUPER_MAGIC;
215 	buf->f_bsize = 1 << PAGE_SHIFT;
216 	buf->f_blocks = c->flash_size >> PAGE_SHIFT;
217 	buf->f_files = 0;
218 	buf->f_ffree = 0;
219 	buf->f_namelen = JFFS2_MAX_NAME_LEN;
220 	buf->f_fsid.val[0] = JFFS2_SUPER_MAGIC;
221 	buf->f_fsid.val[1] = c->mtd->index;
222 
223 	spin_lock(&c->erase_completion_lock);
224 	avail = c->dirty_size + c->free_size;
225 	if (avail > c->sector_size * c->resv_blocks_write)
226 		avail -= c->sector_size * c->resv_blocks_write;
227 	else
228 		avail = 0;
229 	spin_unlock(&c->erase_completion_lock);
230 
231 	buf->f_bavail = buf->f_bfree = avail >> PAGE_SHIFT;
232 
233 	return 0;
234 }
235 
236 
jffs2_evict_inode(struct inode * inode)237 void jffs2_evict_inode (struct inode *inode)
238 {
239 	/* We can forget about this inode for now - drop all
240 	 *  the nodelists associated with it, etc.
241 	 */
242 	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
243 	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
244 
245 	jffs2_dbg(1, "%s(): ino #%lu mode %o\n",
246 		  __func__, inode->i_ino, inode->i_mode);
247 	truncate_inode_pages_final(&inode->i_data);
248 	clear_inode(inode);
249 	jffs2_do_clear_inode(c, f);
250 }
251 
jffs2_iget(struct super_block * sb,unsigned long ino)252 struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
253 {
254 	struct jffs2_inode_info *f;
255 	struct jffs2_sb_info *c;
256 	struct jffs2_raw_inode latest_node;
257 	union jffs2_device_node jdev;
258 	struct inode *inode;
259 	dev_t rdev = 0;
260 	int ret;
261 
262 	jffs2_dbg(1, "%s(): ino == %lu\n", __func__, ino);
263 
264 	inode = iget_locked(sb, ino);
265 	if (!inode)
266 		return ERR_PTR(-ENOMEM);
267 	if (!(inode->i_state & I_NEW))
268 		return inode;
269 
270 	f = JFFS2_INODE_INFO(inode);
271 	c = JFFS2_SB_INFO(inode->i_sb);
272 
273 	jffs2_init_inode_info(f);
274 	mutex_lock(&f->sem);
275 
276 	ret = jffs2_do_read_inode(c, f, inode->i_ino, &latest_node);
277 	if (ret)
278 		goto error;
279 
280 	inode->i_mode = jemode_to_cpu(latest_node.mode);
281 	i_uid_write(inode, je16_to_cpu(latest_node.uid));
282 	i_gid_write(inode, je16_to_cpu(latest_node.gid));
283 	inode->i_size = je32_to_cpu(latest_node.isize);
284 	inode->i_atime = ITIME(je32_to_cpu(latest_node.atime));
285 	inode->i_mtime = ITIME(je32_to_cpu(latest_node.mtime));
286 	inode->i_ctime = ITIME(je32_to_cpu(latest_node.ctime));
287 
288 	set_nlink(inode, f->inocache->pino_nlink);
289 
290 	inode->i_blocks = (inode->i_size + 511) >> 9;
291 
292 	switch (inode->i_mode & S_IFMT) {
293 
294 	case S_IFLNK:
295 		inode->i_op = &jffs2_symlink_inode_operations;
296 		inode->i_link = f->target;
297 		break;
298 
299 	case S_IFDIR:
300 	{
301 		struct jffs2_full_dirent *fd;
302 		set_nlink(inode, 2); /* parent and '.' */
303 
304 		for (fd=f->dents; fd; fd = fd->next) {
305 			if (fd->type == DT_DIR && fd->ino)
306 				inc_nlink(inode);
307 		}
308 		/* Root dir gets i_nlink 3 for some reason */
309 		if (inode->i_ino == 1)
310 			inc_nlink(inode);
311 
312 		inode->i_op = &jffs2_dir_inode_operations;
313 		inode->i_fop = &jffs2_dir_operations;
314 		break;
315 	}
316 	case S_IFREG:
317 		inode->i_op = &jffs2_file_inode_operations;
318 		inode->i_fop = &jffs2_file_operations;
319 		inode->i_mapping->a_ops = &jffs2_file_address_operations;
320 		inode->i_mapping->nrpages = 0;
321 		break;
322 
323 	case S_IFBLK:
324 	case S_IFCHR:
325 		/* Read the device numbers from the media */
326 		if (f->metadata->size != sizeof(jdev.old_id) &&
327 		    f->metadata->size != sizeof(jdev.new_id)) {
328 			pr_notice("Device node has strange size %d\n",
329 				  f->metadata->size);
330 			goto error_io;
331 		}
332 		jffs2_dbg(1, "Reading device numbers from flash\n");
333 		ret = jffs2_read_dnode(c, f, f->metadata, (char *)&jdev, 0, f->metadata->size);
334 		if (ret < 0) {
335 			/* Eep */
336 			pr_notice("Read device numbers for inode %lu failed\n",
337 				  (unsigned long)inode->i_ino);
338 			goto error;
339 		}
340 		if (f->metadata->size == sizeof(jdev.old_id))
341 			rdev = old_decode_dev(je16_to_cpu(jdev.old_id));
342 		else
343 			rdev = new_decode_dev(je32_to_cpu(jdev.new_id));
344 		fallthrough;
345 
346 	case S_IFSOCK:
347 	case S_IFIFO:
348 		inode->i_op = &jffs2_file_inode_operations;
349 		init_special_inode(inode, inode->i_mode, rdev);
350 		break;
351 
352 	default:
353 		pr_warn("%s(): Bogus i_mode %o for ino %lu\n",
354 			__func__, inode->i_mode, (unsigned long)inode->i_ino);
355 	}
356 
357 	mutex_unlock(&f->sem);
358 
359 	jffs2_dbg(1, "jffs2_read_inode() returning\n");
360 	unlock_new_inode(inode);
361 	return inode;
362 
363 error_io:
364 	ret = -EIO;
365 error:
366 	mutex_unlock(&f->sem);
367 	iget_failed(inode);
368 	return ERR_PTR(ret);
369 }
370 
jffs2_dirty_inode(struct inode * inode,int flags)371 void jffs2_dirty_inode(struct inode *inode, int flags)
372 {
373 	struct iattr iattr;
374 
375 	if (!(inode->i_state & I_DIRTY_DATASYNC)) {
376 		jffs2_dbg(2, "%s(): not calling setattr() for ino #%lu\n",
377 			  __func__, inode->i_ino);
378 		return;
379 	}
380 
381 	jffs2_dbg(1, "%s(): calling setattr() for ino #%lu\n",
382 		  __func__, inode->i_ino);
383 
384 	iattr.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_MTIME|ATTR_CTIME;
385 	iattr.ia_mode = inode->i_mode;
386 	iattr.ia_uid = inode->i_uid;
387 	iattr.ia_gid = inode->i_gid;
388 	iattr.ia_atime = inode->i_atime;
389 	iattr.ia_mtime = inode->i_mtime;
390 	iattr.ia_ctime = inode->i_ctime;
391 
392 	jffs2_do_setattr(inode, &iattr);
393 }
394 
jffs2_do_remount_fs(struct super_block * sb,struct fs_context * fc)395 int jffs2_do_remount_fs(struct super_block *sb, struct fs_context *fc)
396 {
397 	struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
398 
399 	if (c->flags & JFFS2_SB_FLAG_RO && !sb_rdonly(sb))
400 		return -EROFS;
401 
402 	/* We stop if it was running, then restart if it needs to.
403 	   This also catches the case where it was stopped and this
404 	   is just a remount to restart it.
405 	   Flush the writebuffer, if neccecary, else we loose it */
406 	if (!sb_rdonly(sb)) {
407 		jffs2_stop_garbage_collect_thread(c);
408 		mutex_lock(&c->alloc_sem);
409 		jffs2_flush_wbuf_pad(c);
410 		mutex_unlock(&c->alloc_sem);
411 	}
412 
413 	if (!(fc->sb_flags & SB_RDONLY))
414 		jffs2_start_garbage_collect_thread(c);
415 
416 	fc->sb_flags |= SB_NOATIME;
417 	return 0;
418 }
419 
420 /* jffs2_new_inode: allocate a new inode and inocache, add it to the hash,
421    fill in the raw_inode while you're at it. */
jffs2_new_inode(struct inode * dir_i,umode_t mode,struct jffs2_raw_inode * ri)422 struct inode *jffs2_new_inode (struct inode *dir_i, umode_t mode, struct jffs2_raw_inode *ri)
423 {
424 	struct inode *inode;
425 	struct super_block *sb = dir_i->i_sb;
426 	struct jffs2_sb_info *c;
427 	struct jffs2_inode_info *f;
428 	int ret;
429 
430 	jffs2_dbg(1, "%s(): dir_i %ld, mode 0x%x\n",
431 		  __func__, dir_i->i_ino, mode);
432 
433 	c = JFFS2_SB_INFO(sb);
434 
435 	inode = new_inode(sb);
436 
437 	if (!inode)
438 		return ERR_PTR(-ENOMEM);
439 
440 	f = JFFS2_INODE_INFO(inode);
441 	jffs2_init_inode_info(f);
442 	mutex_lock(&f->sem);
443 
444 	memset(ri, 0, sizeof(*ri));
445 	/* Set OS-specific defaults for new inodes */
446 	ri->uid = cpu_to_je16(from_kuid(&init_user_ns, current_fsuid()));
447 
448 	if (dir_i->i_mode & S_ISGID) {
449 		ri->gid = cpu_to_je16(i_gid_read(dir_i));
450 		if (S_ISDIR(mode))
451 			mode |= S_ISGID;
452 	} else {
453 		ri->gid = cpu_to_je16(from_kgid(&init_user_ns, current_fsgid()));
454 	}
455 
456 	/* POSIX ACLs have to be processed now, at least partly.
457 	   The umask is only applied if there's no default ACL */
458 	ret = jffs2_init_acl_pre(dir_i, inode, &mode);
459 	if (ret) {
460 		mutex_unlock(&f->sem);
461 		make_bad_inode(inode);
462 		iput(inode);
463 		return ERR_PTR(ret);
464 	}
465 	ret = jffs2_do_new_inode (c, f, mode, ri);
466 	if (ret) {
467 		mutex_unlock(&f->sem);
468 		make_bad_inode(inode);
469 		iput(inode);
470 		return ERR_PTR(ret);
471 	}
472 	set_nlink(inode, 1);
473 	inode->i_ino = je32_to_cpu(ri->ino);
474 	inode->i_mode = jemode_to_cpu(ri->mode);
475 	i_gid_write(inode, je16_to_cpu(ri->gid));
476 	i_uid_write(inode, je16_to_cpu(ri->uid));
477 	inode->i_atime = inode->i_ctime = inode->i_mtime = current_time(inode);
478 	ri->atime = ri->mtime = ri->ctime = cpu_to_je32(I_SEC(inode->i_mtime));
479 
480 	inode->i_blocks = 0;
481 	inode->i_size = 0;
482 
483 	if (insert_inode_locked(inode) < 0) {
484 		mutex_unlock(&f->sem);
485 		make_bad_inode(inode);
486 		iput(inode);
487 		return ERR_PTR(-EINVAL);
488 	}
489 
490 	return inode;
491 }
492 
calculate_inocache_hashsize(uint32_t flash_size)493 static int calculate_inocache_hashsize(uint32_t flash_size)
494 {
495 	/*
496 	 * Pick a inocache hash size based on the size of the medium.
497 	 * Count how many megabytes we're dealing with, apply a hashsize twice
498 	 * that size, but rounding down to the usual big powers of 2. And keep
499 	 * to sensible bounds.
500 	 */
501 
502 	int size_mb = flash_size / 1024 / 1024;
503 	int hashsize = (size_mb * 2) & ~0x3f;
504 
505 	if (hashsize < INOCACHE_HASHSIZE_MIN)
506 		return INOCACHE_HASHSIZE_MIN;
507 	if (hashsize > INOCACHE_HASHSIZE_MAX)
508 		return INOCACHE_HASHSIZE_MAX;
509 
510 	return hashsize;
511 }
512 
jffs2_do_fill_super(struct super_block * sb,struct fs_context * fc)513 int jffs2_do_fill_super(struct super_block *sb, struct fs_context *fc)
514 {
515 	struct jffs2_sb_info *c;
516 	struct inode *root_i;
517 	int ret;
518 	size_t blocks;
519 
520 	c = JFFS2_SB_INFO(sb);
521 
522 	/* Do not support the MLC nand */
523 	if (c->mtd->type == MTD_MLCNANDFLASH)
524 		return -EINVAL;
525 
526 #ifndef CONFIG_JFFS2_FS_WRITEBUFFER
527 	if (c->mtd->type == MTD_NANDFLASH) {
528 		errorf(fc, "Cannot operate on NAND flash unless jffs2 NAND support is compiled in");
529 		return -EINVAL;
530 	}
531 	if (c->mtd->type == MTD_DATAFLASH) {
532 		errorf(fc, "Cannot operate on DataFlash unless jffs2 DataFlash support is compiled in");
533 		return -EINVAL;
534 	}
535 #endif
536 
537 	c->flash_size = c->mtd->size;
538 	c->sector_size = c->mtd->erasesize;
539 	blocks = c->flash_size / c->sector_size;
540 
541 	/*
542 	 * Size alignment check
543 	 */
544 	if ((c->sector_size * blocks) != c->flash_size) {
545 		c->flash_size = c->sector_size * blocks;
546 		infof(fc, "Flash size not aligned to erasesize, reducing to %dKiB",
547 		      c->flash_size / 1024);
548 	}
549 
550 	if (c->flash_size < 5*c->sector_size) {
551 		errorf(fc, "Too few erase blocks (%d)",
552 		       c->flash_size / c->sector_size);
553 		return -EINVAL;
554 	}
555 
556 	c->cleanmarker_size = sizeof(struct jffs2_unknown_node);
557 
558 	/* NAND (or other bizarre) flash... do setup accordingly */
559 	ret = jffs2_flash_setup(c);
560 	if (ret)
561 		return ret;
562 
563 	c->inocache_hashsize = calculate_inocache_hashsize(c->flash_size);
564 	c->inocache_list = kcalloc(c->inocache_hashsize, sizeof(struct jffs2_inode_cache *), GFP_KERNEL);
565 	if (!c->inocache_list) {
566 		ret = -ENOMEM;
567 		goto out_wbuf;
568 	}
569 
570 	jffs2_init_xattr_subsystem(c);
571 
572 	if ((ret = jffs2_do_mount_fs(c)))
573 		goto out_inohash;
574 
575 	jffs2_dbg(1, "%s(): Getting root inode\n", __func__);
576 	root_i = jffs2_iget(sb, 1);
577 	if (IS_ERR(root_i)) {
578 		jffs2_dbg(1, "get root inode failed\n");
579 		ret = PTR_ERR(root_i);
580 		goto out_root;
581 	}
582 
583 	ret = -ENOMEM;
584 
585 	jffs2_dbg(1, "%s(): d_make_root()\n", __func__);
586 	sb->s_root = d_make_root(root_i);
587 	if (!sb->s_root)
588 		goto out_root;
589 
590 	sb->s_maxbytes = 0xFFFFFFFF;
591 	sb->s_blocksize = PAGE_SIZE;
592 	sb->s_blocksize_bits = PAGE_SHIFT;
593 	sb->s_magic = JFFS2_SUPER_MAGIC;
594 	sb->s_time_min = 0;
595 	sb->s_time_max = U32_MAX;
596 
597 	if (!sb_rdonly(sb))
598 		jffs2_start_garbage_collect_thread(c);
599 	return 0;
600 
601 out_root:
602 	jffs2_free_ino_caches(c);
603 	jffs2_free_raw_node_refs(c);
604 	kvfree(c->blocks);
605 	jffs2_clear_xattr_subsystem(c);
606 	jffs2_sum_exit(c);
607  out_inohash:
608 	kfree(c->inocache_list);
609  out_wbuf:
610 	jffs2_flash_cleanup(c);
611 
612 	return ret;
613 }
614 
jffs2_gc_release_inode(struct jffs2_sb_info * c,struct jffs2_inode_info * f)615 void jffs2_gc_release_inode(struct jffs2_sb_info *c,
616 				   struct jffs2_inode_info *f)
617 {
618 	iput(OFNI_EDONI_2SFFJ(f));
619 }
620 
jffs2_gc_fetch_inode(struct jffs2_sb_info * c,int inum,int unlinked)621 struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c,
622 					      int inum, int unlinked)
623 {
624 	struct inode *inode;
625 	struct jffs2_inode_cache *ic;
626 
627 	if (unlinked) {
628 		/* The inode has zero nlink but its nodes weren't yet marked
629 		   obsolete. This has to be because we're still waiting for
630 		   the final (close() and) iput() to happen.
631 
632 		   There's a possibility that the final iput() could have
633 		   happened while we were contemplating. In order to ensure
634 		   that we don't cause a new read_inode() (which would fail)
635 		   for the inode in question, we use ilookup() in this case
636 		   instead of iget().
637 
638 		   The nlink can't _become_ zero at this point because we're
639 		   holding the alloc_sem, and jffs2_do_unlink() would also
640 		   need that while decrementing nlink on any inode.
641 		*/
642 		inode = ilookup(OFNI_BS_2SFFJ(c), inum);
643 		if (!inode) {
644 			jffs2_dbg(1, "ilookup() failed for ino #%u; inode is probably deleted.\n",
645 				  inum);
646 
647 			spin_lock(&c->inocache_lock);
648 			ic = jffs2_get_ino_cache(c, inum);
649 			if (!ic) {
650 				jffs2_dbg(1, "Inode cache for ino #%u is gone\n",
651 					  inum);
652 				spin_unlock(&c->inocache_lock);
653 				return NULL;
654 			}
655 			if (ic->state != INO_STATE_CHECKEDABSENT) {
656 				/* Wait for progress. Don't just loop */
657 				jffs2_dbg(1, "Waiting for ino #%u in state %d\n",
658 					  ic->ino, ic->state);
659 				sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
660 			} else {
661 				spin_unlock(&c->inocache_lock);
662 			}
663 
664 			return NULL;
665 		}
666 	} else {
667 		/* Inode has links to it still; they're not going away because
668 		   jffs2_do_unlink() would need the alloc_sem and we have it.
669 		   Just iget() it, and if read_inode() is necessary that's OK.
670 		*/
671 		inode = jffs2_iget(OFNI_BS_2SFFJ(c), inum);
672 		if (IS_ERR(inode))
673 			return ERR_CAST(inode);
674 	}
675 	if (is_bad_inode(inode)) {
676 		pr_notice("Eep. read_inode() failed for ino #%u. unlinked %d\n",
677 			  inum, unlinked);
678 		/* NB. This will happen again. We need to do something appropriate here. */
679 		iput(inode);
680 		return ERR_PTR(-EIO);
681 	}
682 
683 	return JFFS2_INODE_INFO(inode);
684 }
685 
jffs2_flash_setup(struct jffs2_sb_info * c)686 static int jffs2_flash_setup(struct jffs2_sb_info *c) {
687 	int ret = 0;
688 
689 	if (jffs2_cleanmarker_oob(c)) {
690 		/* NAND flash... do setup accordingly */
691 		ret = jffs2_nand_flash_setup(c);
692 		if (ret)
693 			return ret;
694 	}
695 
696 	/* and Dataflash */
697 	if (jffs2_dataflash(c)) {
698 		ret = jffs2_dataflash_setup(c);
699 		if (ret)
700 			return ret;
701 	}
702 
703 	/* and Intel "Sibley" flash */
704 	if (jffs2_nor_wbuf_flash(c)) {
705 		ret = jffs2_nor_wbuf_flash_setup(c);
706 		if (ret)
707 			return ret;
708 	}
709 
710 	/* and an UBI volume */
711 	if (jffs2_ubivol(c)) {
712 		ret = jffs2_ubivol_setup(c);
713 		if (ret)
714 			return ret;
715 	}
716 
717 	return ret;
718 }
719 
jffs2_flash_cleanup(struct jffs2_sb_info * c)720 void jffs2_flash_cleanup(struct jffs2_sb_info *c) {
721 
722 	if (jffs2_cleanmarker_oob(c)) {
723 		jffs2_nand_flash_cleanup(c);
724 	}
725 
726 	/* and DataFlash */
727 	if (jffs2_dataflash(c)) {
728 		jffs2_dataflash_cleanup(c);
729 	}
730 
731 	/* and Intel "Sibley" flash */
732 	if (jffs2_nor_wbuf_flash(c)) {
733 		jffs2_nor_wbuf_flash_cleanup(c);
734 	}
735 
736 	/* and an UBI volume */
737 	if (jffs2_ubivol(c)) {
738 		jffs2_ubivol_cleanup(c);
739 	}
740 }
741