• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * JFFS2 -- Journalling Flash File System, Version 2.
3  *
4  * Copyright © 2001-2007 Red Hat, Inc.
5  * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
6  *
7  * Created by David Woodhouse <dwmw2@infradead.org>
8  *
9  * For licensing information, see the file 'LICENCE' in this directory.
10  *
11  */
12 
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 
15 #include <linux/capability.h>
16 #include <linux/kernel.h>
17 #include <linux/sched.h>
18 #include <linux/fs.h>
19 #include <linux/list.h>
20 #include <linux/mtd/mtd.h>
21 #include <linux/pagemap.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/vfs.h>
25 #include <linux/crc32.h>
26 #include "nodelist.h"
27 
28 static int jffs2_flash_setup(struct jffs2_sb_info *c);
29 
jffs2_do_setattr(struct inode * inode,struct iattr * iattr)30 int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
31 {
32 	struct jffs2_full_dnode *old_metadata, *new_metadata;
33 	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
34 	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
35 	struct jffs2_raw_inode *ri;
36 	union jffs2_device_node dev;
37 	unsigned char *mdata = NULL;
38 	int mdatalen = 0;
39 	unsigned int ivalid;
40 	uint32_t alloclen;
41 	int ret;
42 	int alloc_type = ALLOC_NORMAL;
43 
44 	jffs2_dbg(1, "%s(): ino #%lu\n", __func__, inode->i_ino);
45 
46 	/* Special cases - we don't want more than one data node
47 	   for these types on the medium at any time. So setattr
48 	   must read the original data associated with the node
49 	   (i.e. the device numbers or the target name) and write
50 	   it out again with the appropriate data attached */
51 	if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) {
52 		/* For these, we don't actually need to read the old node */
53 		mdatalen = jffs2_encode_dev(&dev, inode->i_rdev);
54 		mdata = (char *)&dev;
55 		jffs2_dbg(1, "%s(): Writing %d bytes of kdev_t\n",
56 			  __func__, mdatalen);
57 	} else if (S_ISLNK(inode->i_mode)) {
58 		mutex_lock(&f->sem);
59 		mdatalen = f->metadata->size;
60 		mdata = kmalloc(f->metadata->size, GFP_USER);
61 		if (!mdata) {
62 			mutex_unlock(&f->sem);
63 			return -ENOMEM;
64 		}
65 		ret = jffs2_read_dnode(c, f, f->metadata, mdata, 0, mdatalen);
66 		if (ret) {
67 			mutex_unlock(&f->sem);
68 			kfree(mdata);
69 			return ret;
70 		}
71 		mutex_unlock(&f->sem);
72 		jffs2_dbg(1, "%s(): Writing %d bytes of symlink target\n",
73 			  __func__, mdatalen);
74 	}
75 
76 	ri = jffs2_alloc_raw_inode();
77 	if (!ri) {
78 		if (S_ISLNK(inode->i_mode))
79 			kfree(mdata);
80 		return -ENOMEM;
81 	}
82 
83 	ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &alloclen,
84 				  ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
85 	if (ret) {
86 		jffs2_free_raw_inode(ri);
87 		if (S_ISLNK(inode->i_mode))
88 			 kfree(mdata);
89 		return ret;
90 	}
91 	mutex_lock(&f->sem);
92 	ivalid = iattr->ia_valid;
93 
94 	ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
95 	ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
96 	ri->totlen = cpu_to_je32(sizeof(*ri) + mdatalen);
97 	ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4));
98 
99 	ri->ino = cpu_to_je32(inode->i_ino);
100 	ri->version = cpu_to_je32(++f->highest_version);
101 
102 	ri->uid = cpu_to_je16((ivalid & ATTR_UID)?
103 		from_kuid(&init_user_ns, iattr->ia_uid):i_uid_read(inode));
104 	ri->gid = cpu_to_je16((ivalid & ATTR_GID)?
105 		from_kgid(&init_user_ns, iattr->ia_gid):i_gid_read(inode));
106 
107 	if (ivalid & ATTR_MODE)
108 		ri->mode = cpu_to_jemode(iattr->ia_mode);
109 	else
110 		ri->mode = cpu_to_jemode(inode->i_mode);
111 
112 
113 	ri->isize = cpu_to_je32((ivalid & ATTR_SIZE)?iattr->ia_size:inode->i_size);
114 	ri->atime = cpu_to_je32(I_SEC((ivalid & ATTR_ATIME)?iattr->ia_atime:inode->i_atime));
115 	ri->mtime = cpu_to_je32(I_SEC((ivalid & ATTR_MTIME)?iattr->ia_mtime:inode->i_mtime));
116 	ri->ctime = cpu_to_je32(I_SEC((ivalid & ATTR_CTIME)?iattr->ia_ctime:inode->i_ctime));
117 
118 	ri->offset = cpu_to_je32(0);
119 	ri->csize = ri->dsize = cpu_to_je32(mdatalen);
120 	ri->compr = JFFS2_COMPR_NONE;
121 	if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
122 		/* It's an extension. Make it a hole node */
123 		ri->compr = JFFS2_COMPR_ZERO;
124 		ri->dsize = cpu_to_je32(iattr->ia_size - inode->i_size);
125 		ri->offset = cpu_to_je32(inode->i_size);
126 	} else if (ivalid & ATTR_SIZE && !iattr->ia_size) {
127 		/* For truncate-to-zero, treat it as deletion because
128 		   it'll always be obsoleting all previous nodes */
129 		alloc_type = ALLOC_DELETION;
130 	}
131 	ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
132 	if (mdatalen)
133 		ri->data_crc = cpu_to_je32(crc32(0, mdata, mdatalen));
134 	else
135 		ri->data_crc = cpu_to_je32(0);
136 
137 	new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, alloc_type);
138 	if (S_ISLNK(inode->i_mode))
139 		kfree(mdata);
140 
141 	if (IS_ERR(new_metadata)) {
142 		jffs2_complete_reservation(c);
143 		jffs2_free_raw_inode(ri);
144 		mutex_unlock(&f->sem);
145 		return PTR_ERR(new_metadata);
146 	}
147 	/* It worked. Update the inode */
148 	inode->i_atime = ITIME(je32_to_cpu(ri->atime));
149 	inode->i_ctime = ITIME(je32_to_cpu(ri->ctime));
150 	inode->i_mtime = ITIME(je32_to_cpu(ri->mtime));
151 	inode->i_mode = jemode_to_cpu(ri->mode);
152 	i_uid_write(inode, je16_to_cpu(ri->uid));
153 	i_gid_write(inode, je16_to_cpu(ri->gid));
154 
155 
156 	old_metadata = f->metadata;
157 
158 	if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size)
159 		jffs2_truncate_fragtree (c, &f->fragtree, iattr->ia_size);
160 
161 	if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
162 		jffs2_add_full_dnode_to_inode(c, f, new_metadata);
163 		inode->i_size = iattr->ia_size;
164 		inode->i_blocks = (inode->i_size + 511) >> 9;
165 		f->metadata = NULL;
166 	} else {
167 		f->metadata = new_metadata;
168 	}
169 	if (old_metadata) {
170 		jffs2_mark_node_obsolete(c, old_metadata->raw);
171 		jffs2_free_full_dnode(old_metadata);
172 	}
173 	jffs2_free_raw_inode(ri);
174 
175 	mutex_unlock(&f->sem);
176 	jffs2_complete_reservation(c);
177 
178 	/* We have to do the truncate_setsize() without f->sem held, since
179 	   some pages may be locked and waiting for it in readpage().
180 	   We are protected from a simultaneous write() extending i_size
181 	   back past iattr->ia_size, because do_truncate() holds the
182 	   generic inode semaphore. */
183 	if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) {
184 		truncate_setsize(inode, iattr->ia_size);
185 		inode->i_blocks = (inode->i_size + 511) >> 9;
186 	}
187 
188 	return 0;
189 }
190 
jffs2_setattr(struct dentry * dentry,struct iattr * iattr)191 int jffs2_setattr(struct dentry *dentry, struct iattr *iattr)
192 {
193 	struct inode *inode = d_inode(dentry);
194 	int rc;
195 
196 	rc = inode_change_ok(inode, iattr);
197 	if (rc)
198 		return rc;
199 
200 	rc = jffs2_do_setattr(inode, iattr);
201 	if (!rc && (iattr->ia_valid & ATTR_MODE))
202 		rc = posix_acl_chmod(inode, inode->i_mode);
203 
204 	return rc;
205 }
206 
jffs2_statfs(struct dentry * dentry,struct kstatfs * buf)207 int jffs2_statfs(struct dentry *dentry, struct kstatfs *buf)
208 {
209 	struct jffs2_sb_info *c = JFFS2_SB_INFO(dentry->d_sb);
210 	unsigned long avail;
211 
212 	buf->f_type = JFFS2_SUPER_MAGIC;
213 	buf->f_bsize = 1 << PAGE_SHIFT;
214 	buf->f_blocks = c->flash_size >> PAGE_SHIFT;
215 	buf->f_files = 0;
216 	buf->f_ffree = 0;
217 	buf->f_namelen = JFFS2_MAX_NAME_LEN;
218 	buf->f_fsid.val[0] = JFFS2_SUPER_MAGIC;
219 	buf->f_fsid.val[1] = c->mtd->index;
220 
221 	spin_lock(&c->erase_completion_lock);
222 	avail = c->dirty_size + c->free_size;
223 	if (avail > c->sector_size * c->resv_blocks_write)
224 		avail -= c->sector_size * c->resv_blocks_write;
225 	else
226 		avail = 0;
227 	spin_unlock(&c->erase_completion_lock);
228 
229 	buf->f_bavail = buf->f_bfree = avail >> PAGE_SHIFT;
230 
231 	return 0;
232 }
233 
234 
jffs2_evict_inode(struct inode * inode)235 void jffs2_evict_inode (struct inode *inode)
236 {
237 	/* We can forget about this inode for now - drop all
238 	 *  the nodelists associated with it, etc.
239 	 */
240 	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
241 	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
242 
243 	jffs2_dbg(1, "%s(): ino #%lu mode %o\n",
244 		  __func__, inode->i_ino, inode->i_mode);
245 	truncate_inode_pages_final(&inode->i_data);
246 	clear_inode(inode);
247 	jffs2_do_clear_inode(c, f);
248 }
249 
jffs2_iget(struct super_block * sb,unsigned long ino)250 struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
251 {
252 	struct jffs2_inode_info *f;
253 	struct jffs2_sb_info *c;
254 	struct jffs2_raw_inode latest_node;
255 	union jffs2_device_node jdev;
256 	struct inode *inode;
257 	dev_t rdev = 0;
258 	int ret;
259 
260 	jffs2_dbg(1, "%s(): ino == %lu\n", __func__, ino);
261 
262 	inode = iget_locked(sb, ino);
263 	if (!inode)
264 		return ERR_PTR(-ENOMEM);
265 	if (!(inode->i_state & I_NEW))
266 		return inode;
267 
268 	f = JFFS2_INODE_INFO(inode);
269 	c = JFFS2_SB_INFO(inode->i_sb);
270 
271 	jffs2_init_inode_info(f);
272 	mutex_lock(&f->sem);
273 
274 	ret = jffs2_do_read_inode(c, f, inode->i_ino, &latest_node);
275 	if (ret)
276 		goto error;
277 
278 	inode->i_mode = jemode_to_cpu(latest_node.mode);
279 	i_uid_write(inode, je16_to_cpu(latest_node.uid));
280 	i_gid_write(inode, je16_to_cpu(latest_node.gid));
281 	inode->i_size = je32_to_cpu(latest_node.isize);
282 	inode->i_atime = ITIME(je32_to_cpu(latest_node.atime));
283 	inode->i_mtime = ITIME(je32_to_cpu(latest_node.mtime));
284 	inode->i_ctime = ITIME(je32_to_cpu(latest_node.ctime));
285 
286 	set_nlink(inode, f->inocache->pino_nlink);
287 
288 	inode->i_blocks = (inode->i_size + 511) >> 9;
289 
290 	switch (inode->i_mode & S_IFMT) {
291 
292 	case S_IFLNK:
293 		inode->i_op = &jffs2_symlink_inode_operations;
294 		inode->i_link = f->target;
295 		break;
296 
297 	case S_IFDIR:
298 	{
299 		struct jffs2_full_dirent *fd;
300 		set_nlink(inode, 2); /* parent and '.' */
301 
302 		for (fd=f->dents; fd; fd = fd->next) {
303 			if (fd->type == DT_DIR && fd->ino)
304 				inc_nlink(inode);
305 		}
306 		/* Root dir gets i_nlink 3 for some reason */
307 		if (inode->i_ino == 1)
308 			inc_nlink(inode);
309 
310 		inode->i_op = &jffs2_dir_inode_operations;
311 		inode->i_fop = &jffs2_dir_operations;
312 		break;
313 	}
314 	case S_IFREG:
315 		inode->i_op = &jffs2_file_inode_operations;
316 		inode->i_fop = &jffs2_file_operations;
317 		inode->i_mapping->a_ops = &jffs2_file_address_operations;
318 		inode->i_mapping->nrpages = 0;
319 		break;
320 
321 	case S_IFBLK:
322 	case S_IFCHR:
323 		/* Read the device numbers from the media */
324 		if (f->metadata->size != sizeof(jdev.old_id) &&
325 		    f->metadata->size != sizeof(jdev.new_id)) {
326 			pr_notice("Device node has strange size %d\n",
327 				  f->metadata->size);
328 			goto error_io;
329 		}
330 		jffs2_dbg(1, "Reading device numbers from flash\n");
331 		ret = jffs2_read_dnode(c, f, f->metadata, (char *)&jdev, 0, f->metadata->size);
332 		if (ret < 0) {
333 			/* Eep */
334 			pr_notice("Read device numbers for inode %lu failed\n",
335 				  (unsigned long)inode->i_ino);
336 			goto error;
337 		}
338 		if (f->metadata->size == sizeof(jdev.old_id))
339 			rdev = old_decode_dev(je16_to_cpu(jdev.old_id));
340 		else
341 			rdev = new_decode_dev(je32_to_cpu(jdev.new_id));
342 
343 	case S_IFSOCK:
344 	case S_IFIFO:
345 		inode->i_op = &jffs2_file_inode_operations;
346 		init_special_inode(inode, inode->i_mode, rdev);
347 		break;
348 
349 	default:
350 		pr_warn("%s(): Bogus i_mode %o for ino %lu\n",
351 			__func__, inode->i_mode, (unsigned long)inode->i_ino);
352 	}
353 
354 	mutex_unlock(&f->sem);
355 
356 	jffs2_dbg(1, "jffs2_read_inode() returning\n");
357 	unlock_new_inode(inode);
358 	return inode;
359 
360 error_io:
361 	ret = -EIO;
362 error:
363 	mutex_unlock(&f->sem);
364 	iget_failed(inode);
365 	return ERR_PTR(ret);
366 }
367 
jffs2_dirty_inode(struct inode * inode,int flags)368 void jffs2_dirty_inode(struct inode *inode, int flags)
369 {
370 	struct iattr iattr;
371 
372 	if (!(inode->i_state & I_DIRTY_DATASYNC)) {
373 		jffs2_dbg(2, "%s(): not calling setattr() for ino #%lu\n",
374 			  __func__, inode->i_ino);
375 		return;
376 	}
377 
378 	jffs2_dbg(1, "%s(): calling setattr() for ino #%lu\n",
379 		  __func__, inode->i_ino);
380 
381 	iattr.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_MTIME|ATTR_CTIME;
382 	iattr.ia_mode = inode->i_mode;
383 	iattr.ia_uid = inode->i_uid;
384 	iattr.ia_gid = inode->i_gid;
385 	iattr.ia_atime = inode->i_atime;
386 	iattr.ia_mtime = inode->i_mtime;
387 	iattr.ia_ctime = inode->i_ctime;
388 
389 	jffs2_do_setattr(inode, &iattr);
390 }
391 
jffs2_do_remount_fs(struct super_block * sb,int * flags,char * data)392 int jffs2_do_remount_fs(struct super_block *sb, int *flags, char *data)
393 {
394 	struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
395 
396 	if (c->flags & JFFS2_SB_FLAG_RO && !(sb->s_flags & MS_RDONLY))
397 		return -EROFS;
398 
399 	/* We stop if it was running, then restart if it needs to.
400 	   This also catches the case where it was stopped and this
401 	   is just a remount to restart it.
402 	   Flush the writebuffer, if neccecary, else we loose it */
403 	if (!(sb->s_flags & MS_RDONLY)) {
404 		jffs2_stop_garbage_collect_thread(c);
405 		mutex_lock(&c->alloc_sem);
406 		jffs2_flush_wbuf_pad(c);
407 		mutex_unlock(&c->alloc_sem);
408 	}
409 
410 	if (!(*flags & MS_RDONLY))
411 		jffs2_start_garbage_collect_thread(c);
412 
413 	*flags |= MS_NOATIME;
414 	return 0;
415 }
416 
417 /* jffs2_new_inode: allocate a new inode and inocache, add it to the hash,
418    fill in the raw_inode while you're at it. */
jffs2_new_inode(struct inode * dir_i,umode_t mode,struct jffs2_raw_inode * ri)419 struct inode *jffs2_new_inode (struct inode *dir_i, umode_t mode, struct jffs2_raw_inode *ri)
420 {
421 	struct inode *inode;
422 	struct super_block *sb = dir_i->i_sb;
423 	struct jffs2_sb_info *c;
424 	struct jffs2_inode_info *f;
425 	int ret;
426 
427 	jffs2_dbg(1, "%s(): dir_i %ld, mode 0x%x\n",
428 		  __func__, dir_i->i_ino, mode);
429 
430 	c = JFFS2_SB_INFO(sb);
431 
432 	inode = new_inode(sb);
433 
434 	if (!inode)
435 		return ERR_PTR(-ENOMEM);
436 
437 	f = JFFS2_INODE_INFO(inode);
438 	jffs2_init_inode_info(f);
439 	mutex_lock(&f->sem);
440 
441 	memset(ri, 0, sizeof(*ri));
442 	/* Set OS-specific defaults for new inodes */
443 	ri->uid = cpu_to_je16(from_kuid(&init_user_ns, current_fsuid()));
444 
445 	if (dir_i->i_mode & S_ISGID) {
446 		ri->gid = cpu_to_je16(i_gid_read(dir_i));
447 		if (S_ISDIR(mode))
448 			mode |= S_ISGID;
449 	} else {
450 		ri->gid = cpu_to_je16(from_kgid(&init_user_ns, current_fsgid()));
451 	}
452 
453 	/* POSIX ACLs have to be processed now, at least partly.
454 	   The umask is only applied if there's no default ACL */
455 	ret = jffs2_init_acl_pre(dir_i, inode, &mode);
456 	if (ret) {
457 		mutex_unlock(&f->sem);
458 		make_bad_inode(inode);
459 		iput(inode);
460 		return ERR_PTR(ret);
461 	}
462 	ret = jffs2_do_new_inode (c, f, mode, ri);
463 	if (ret) {
464 		mutex_unlock(&f->sem);
465 		make_bad_inode(inode);
466 		iput(inode);
467 		return ERR_PTR(ret);
468 	}
469 	set_nlink(inode, 1);
470 	inode->i_ino = je32_to_cpu(ri->ino);
471 	inode->i_mode = jemode_to_cpu(ri->mode);
472 	i_gid_write(inode, je16_to_cpu(ri->gid));
473 	i_uid_write(inode, je16_to_cpu(ri->uid));
474 	inode->i_atime = inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
475 	ri->atime = ri->mtime = ri->ctime = cpu_to_je32(I_SEC(inode->i_mtime));
476 
477 	inode->i_blocks = 0;
478 	inode->i_size = 0;
479 
480 	if (insert_inode_locked(inode) < 0) {
481 		mutex_unlock(&f->sem);
482 		make_bad_inode(inode);
483 		iput(inode);
484 		return ERR_PTR(-EINVAL);
485 	}
486 
487 	return inode;
488 }
489 
calculate_inocache_hashsize(uint32_t flash_size)490 static int calculate_inocache_hashsize(uint32_t flash_size)
491 {
492 	/*
493 	 * Pick a inocache hash size based on the size of the medium.
494 	 * Count how many megabytes we're dealing with, apply a hashsize twice
495 	 * that size, but rounding down to the usual big powers of 2. And keep
496 	 * to sensible bounds.
497 	 */
498 
499 	int size_mb = flash_size / 1024 / 1024;
500 	int hashsize = (size_mb * 2) & ~0x3f;
501 
502 	if (hashsize < INOCACHE_HASHSIZE_MIN)
503 		return INOCACHE_HASHSIZE_MIN;
504 	if (hashsize > INOCACHE_HASHSIZE_MAX)
505 		return INOCACHE_HASHSIZE_MAX;
506 
507 	return hashsize;
508 }
509 
jffs2_do_fill_super(struct super_block * sb,void * data,int silent)510 int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
511 {
512 	struct jffs2_sb_info *c;
513 	struct inode *root_i;
514 	int ret;
515 	size_t blocks;
516 
517 	c = JFFS2_SB_INFO(sb);
518 
519 	/* Do not support the MLC nand */
520 	if (c->mtd->type == MTD_MLCNANDFLASH)
521 		return -EINVAL;
522 
523 #ifndef CONFIG_JFFS2_FS_WRITEBUFFER
524 	if (c->mtd->type == MTD_NANDFLASH) {
525 		pr_err("Cannot operate on NAND flash unless jffs2 NAND support is compiled in\n");
526 		return -EINVAL;
527 	}
528 	if (c->mtd->type == MTD_DATAFLASH) {
529 		pr_err("Cannot operate on DataFlash unless jffs2 DataFlash support is compiled in\n");
530 		return -EINVAL;
531 	}
532 #endif
533 
534 	c->flash_size = c->mtd->size;
535 	c->sector_size = c->mtd->erasesize;
536 	blocks = c->flash_size / c->sector_size;
537 
538 	/*
539 	 * Size alignment check
540 	 */
541 	if ((c->sector_size * blocks) != c->flash_size) {
542 		c->flash_size = c->sector_size * blocks;
543 		pr_info("Flash size not aligned to erasesize, reducing to %dKiB\n",
544 			c->flash_size / 1024);
545 	}
546 
547 	if (c->flash_size < 5*c->sector_size) {
548 		pr_err("Too few erase blocks (%d)\n",
549 		       c->flash_size / c->sector_size);
550 		return -EINVAL;
551 	}
552 
553 	c->cleanmarker_size = sizeof(struct jffs2_unknown_node);
554 
555 	/* NAND (or other bizarre) flash... do setup accordingly */
556 	ret = jffs2_flash_setup(c);
557 	if (ret)
558 		return ret;
559 
560 	c->inocache_hashsize = calculate_inocache_hashsize(c->flash_size);
561 	c->inocache_list = kcalloc(c->inocache_hashsize, sizeof(struct jffs2_inode_cache *), GFP_KERNEL);
562 	if (!c->inocache_list) {
563 		ret = -ENOMEM;
564 		goto out_wbuf;
565 	}
566 
567 	jffs2_init_xattr_subsystem(c);
568 
569 	if ((ret = jffs2_do_mount_fs(c)))
570 		goto out_inohash;
571 
572 	jffs2_dbg(1, "%s(): Getting root inode\n", __func__);
573 	root_i = jffs2_iget(sb, 1);
574 	if (IS_ERR(root_i)) {
575 		jffs2_dbg(1, "get root inode failed\n");
576 		ret = PTR_ERR(root_i);
577 		goto out_root;
578 	}
579 
580 	ret = -ENOMEM;
581 
582 	jffs2_dbg(1, "%s(): d_make_root()\n", __func__);
583 	sb->s_root = d_make_root(root_i);
584 	if (!sb->s_root)
585 		goto out_root;
586 
587 	sb->s_maxbytes = 0xFFFFFFFF;
588 	sb->s_blocksize = PAGE_CACHE_SIZE;
589 	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
590 	sb->s_magic = JFFS2_SUPER_MAGIC;
591 	if (!(sb->s_flags & MS_RDONLY))
592 		jffs2_start_garbage_collect_thread(c);
593 	return 0;
594 
595 out_root:
596 	jffs2_free_ino_caches(c);
597 	jffs2_free_raw_node_refs(c);
598 	if (jffs2_blocks_use_vmalloc(c))
599 		vfree(c->blocks);
600 	else
601 		kfree(c->blocks);
602  out_inohash:
603 	jffs2_clear_xattr_subsystem(c);
604 	kfree(c->inocache_list);
605  out_wbuf:
606 	jffs2_flash_cleanup(c);
607 
608 	return ret;
609 }
610 
jffs2_gc_release_inode(struct jffs2_sb_info * c,struct jffs2_inode_info * f)611 void jffs2_gc_release_inode(struct jffs2_sb_info *c,
612 				   struct jffs2_inode_info *f)
613 {
614 	iput(OFNI_EDONI_2SFFJ(f));
615 }
616 
jffs2_gc_fetch_inode(struct jffs2_sb_info * c,int inum,int unlinked)617 struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c,
618 					      int inum, int unlinked)
619 {
620 	struct inode *inode;
621 	struct jffs2_inode_cache *ic;
622 
623 	if (unlinked) {
624 		/* The inode has zero nlink but its nodes weren't yet marked
625 		   obsolete. This has to be because we're still waiting for
626 		   the final (close() and) iput() to happen.
627 
628 		   There's a possibility that the final iput() could have
629 		   happened while we were contemplating. In order to ensure
630 		   that we don't cause a new read_inode() (which would fail)
631 		   for the inode in question, we use ilookup() in this case
632 		   instead of iget().
633 
634 		   The nlink can't _become_ zero at this point because we're
635 		   holding the alloc_sem, and jffs2_do_unlink() would also
636 		   need that while decrementing nlink on any inode.
637 		*/
638 		inode = ilookup(OFNI_BS_2SFFJ(c), inum);
639 		if (!inode) {
640 			jffs2_dbg(1, "ilookup() failed for ino #%u; inode is probably deleted.\n",
641 				  inum);
642 
643 			spin_lock(&c->inocache_lock);
644 			ic = jffs2_get_ino_cache(c, inum);
645 			if (!ic) {
646 				jffs2_dbg(1, "Inode cache for ino #%u is gone\n",
647 					  inum);
648 				spin_unlock(&c->inocache_lock);
649 				return NULL;
650 			}
651 			if (ic->state != INO_STATE_CHECKEDABSENT) {
652 				/* Wait for progress. Don't just loop */
653 				jffs2_dbg(1, "Waiting for ino #%u in state %d\n",
654 					  ic->ino, ic->state);
655 				sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
656 			} else {
657 				spin_unlock(&c->inocache_lock);
658 			}
659 
660 			return NULL;
661 		}
662 	} else {
663 		/* Inode has links to it still; they're not going away because
664 		   jffs2_do_unlink() would need the alloc_sem and we have it.
665 		   Just iget() it, and if read_inode() is necessary that's OK.
666 		*/
667 		inode = jffs2_iget(OFNI_BS_2SFFJ(c), inum);
668 		if (IS_ERR(inode))
669 			return ERR_CAST(inode);
670 	}
671 	if (is_bad_inode(inode)) {
672 		pr_notice("Eep. read_inode() failed for ino #%u. unlinked %d\n",
673 			  inum, unlinked);
674 		/* NB. This will happen again. We need to do something appropriate here. */
675 		iput(inode);
676 		return ERR_PTR(-EIO);
677 	}
678 
679 	return JFFS2_INODE_INFO(inode);
680 }
681 
jffs2_gc_fetch_page(struct jffs2_sb_info * c,struct jffs2_inode_info * f,unsigned long offset,unsigned long * priv)682 unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c,
683 				   struct jffs2_inode_info *f,
684 				   unsigned long offset,
685 				   unsigned long *priv)
686 {
687 	struct inode *inode = OFNI_EDONI_2SFFJ(f);
688 	struct page *pg;
689 
690 	pg = read_cache_page(inode->i_mapping, offset >> PAGE_CACHE_SHIFT,
691 			     (void *)jffs2_do_readpage_unlock, inode);
692 	if (IS_ERR(pg))
693 		return (void *)pg;
694 
695 	*priv = (unsigned long)pg;
696 	return kmap(pg);
697 }
698 
jffs2_gc_release_page(struct jffs2_sb_info * c,unsigned char * ptr,unsigned long * priv)699 void jffs2_gc_release_page(struct jffs2_sb_info *c,
700 			   unsigned char *ptr,
701 			   unsigned long *priv)
702 {
703 	struct page *pg = (void *)*priv;
704 
705 	kunmap(pg);
706 	page_cache_release(pg);
707 }
708 
jffs2_flash_setup(struct jffs2_sb_info * c)709 static int jffs2_flash_setup(struct jffs2_sb_info *c) {
710 	int ret = 0;
711 
712 	if (jffs2_cleanmarker_oob(c)) {
713 		/* NAND flash... do setup accordingly */
714 		ret = jffs2_nand_flash_setup(c);
715 		if (ret)
716 			return ret;
717 	}
718 
719 	/* and Dataflash */
720 	if (jffs2_dataflash(c)) {
721 		ret = jffs2_dataflash_setup(c);
722 		if (ret)
723 			return ret;
724 	}
725 
726 	/* and Intel "Sibley" flash */
727 	if (jffs2_nor_wbuf_flash(c)) {
728 		ret = jffs2_nor_wbuf_flash_setup(c);
729 		if (ret)
730 			return ret;
731 	}
732 
733 	/* and an UBI volume */
734 	if (jffs2_ubivol(c)) {
735 		ret = jffs2_ubivol_setup(c);
736 		if (ret)
737 			return ret;
738 	}
739 
740 	return ret;
741 }
742 
jffs2_flash_cleanup(struct jffs2_sb_info * c)743 void jffs2_flash_cleanup(struct jffs2_sb_info *c) {
744 
745 	if (jffs2_cleanmarker_oob(c)) {
746 		jffs2_nand_flash_cleanup(c);
747 	}
748 
749 	/* and DataFlash */
750 	if (jffs2_dataflash(c)) {
751 		jffs2_dataflash_cleanup(c);
752 	}
753 
754 	/* and Intel "Sibley" flash */
755 	if (jffs2_nor_wbuf_flash(c)) {
756 		jffs2_nor_wbuf_flash_cleanup(c);
757 	}
758 
759 	/* and an UBI volume */
760 	if (jffs2_ubivol(c)) {
761 		jffs2_ubivol_cleanup(c);
762 	}
763 }
764