1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) International Business Machines Corp., 2000-2004
4 */
5
6 /*
7 * jfs_imap.c: inode allocation map manager
8 *
9 * Serialization:
10 * Each AG has a simple lock which is used to control the serialization of
11 * the AG level lists. This lock should be taken first whenever an AG
12 * level list will be modified or accessed.
13 *
14 * Each IAG is locked by obtaining the buffer for the IAG page.
15 *
16 * There is also a inode lock for the inode map inode. A read lock needs to
17 * be taken whenever an IAG is read from the map or the global level
18 * information is read. A write lock needs to be taken whenever the global
19 * level information is modified or an atomic operation needs to be used.
20 *
21 * If more than one IAG is read at one time, the read lock may not
22 * be given up until all of the IAG's are read. Otherwise, a deadlock
23 * may occur when trying to obtain the read lock while another thread
24 * holding the read lock is waiting on the IAG already being held.
25 *
26 * The control page of the inode map is read into memory by diMount().
27 * Thereafter it should only be modified in memory and then it will be
28 * written out when the filesystem is unmounted by diUnmount().
29 */
30
31 #include <linux/fs.h>
32 #include <linux/buffer_head.h>
33 #include <linux/pagemap.h>
34 #include <linux/quotaops.h>
35 #include <linux/slab.h>
36
37 #include "jfs_incore.h"
38 #include "jfs_inode.h"
39 #include "jfs_filsys.h"
40 #include "jfs_dinode.h"
41 #include "jfs_dmap.h"
42 #include "jfs_imap.h"
43 #include "jfs_metapage.h"
44 #include "jfs_superblock.h"
45 #include "jfs_debug.h"
46
47 /*
48 * imap locks
49 */
50 /* iag free list lock */
51 #define IAGFREE_LOCK_INIT(imap) mutex_init(&imap->im_freelock)
52 #define IAGFREE_LOCK(imap) mutex_lock(&imap->im_freelock)
53 #define IAGFREE_UNLOCK(imap) mutex_unlock(&imap->im_freelock)
54
55 /* per ag iag list locks */
56 #define AG_LOCK_INIT(imap,index) mutex_init(&(imap->im_aglock[index]))
57 #define AG_LOCK(imap,agno) mutex_lock(&imap->im_aglock[agno])
58 #define AG_UNLOCK(imap,agno) mutex_unlock(&imap->im_aglock[agno])
59
60 /*
61 * forward references
62 */
63 static int diAllocAG(struct inomap *, int, bool, struct inode *);
64 static int diAllocAny(struct inomap *, int, bool, struct inode *);
65 static int diAllocBit(struct inomap *, struct iag *, int);
66 static int diAllocExt(struct inomap *, int, struct inode *);
67 static int diAllocIno(struct inomap *, int, struct inode *);
68 static int diFindFree(u32, int);
69 static int diNewExt(struct inomap *, struct iag *, int);
70 static int diNewIAG(struct inomap *, int *, int, struct metapage **);
71 static void duplicateIXtree(struct super_block *, s64, int, s64 *);
72
73 static int diIAGRead(struct inomap * imap, int, struct metapage **);
74 static int copy_from_dinode(struct dinode *, struct inode *);
75 static void copy_to_dinode(struct dinode *, struct inode *);
76
77 /*
78 * NAME: diMount()
79 *
80 * FUNCTION: initialize the incore inode map control structures for
81 * a fileset or aggregate init time.
82 *
83 * the inode map's control structure (dinomap) is
84 * brought in from disk and placed in virtual memory.
85 *
86 * PARAMETERS:
87 * ipimap - pointer to inode map inode for the aggregate or fileset.
88 *
89 * RETURN VALUES:
90 * 0 - success
91 * -ENOMEM - insufficient free virtual memory.
92 * -EIO - i/o error.
93 */
diMount(struct inode * ipimap)94 int diMount(struct inode *ipimap)
95 {
96 struct inomap *imap;
97 struct metapage *mp;
98 int index;
99 struct dinomap_disk *dinom_le;
100
101 /*
102 * allocate/initialize the in-memory inode map control structure
103 */
104 /* allocate the in-memory inode map control structure. */
105 imap = kmalloc(sizeof(struct inomap), GFP_KERNEL);
106 if (imap == NULL)
107 return -ENOMEM;
108
109 /* read the on-disk inode map control structure. */
110
111 mp = read_metapage(ipimap,
112 IMAPBLKNO << JFS_SBI(ipimap->i_sb)->l2nbperpage,
113 PSIZE, 0);
114 if (mp == NULL) {
115 kfree(imap);
116 return -EIO;
117 }
118
119 /* copy the on-disk version to the in-memory version. */
120 dinom_le = (struct dinomap_disk *) mp->data;
121 imap->im_freeiag = le32_to_cpu(dinom_le->in_freeiag);
122 imap->im_nextiag = le32_to_cpu(dinom_le->in_nextiag);
123 atomic_set(&imap->im_numinos, le32_to_cpu(dinom_le->in_numinos));
124 atomic_set(&imap->im_numfree, le32_to_cpu(dinom_le->in_numfree));
125 imap->im_nbperiext = le32_to_cpu(dinom_le->in_nbperiext);
126 imap->im_l2nbperiext = le32_to_cpu(dinom_le->in_l2nbperiext);
127 for (index = 0; index < MAXAG; index++) {
128 imap->im_agctl[index].inofree =
129 le32_to_cpu(dinom_le->in_agctl[index].inofree);
130 imap->im_agctl[index].extfree =
131 le32_to_cpu(dinom_le->in_agctl[index].extfree);
132 imap->im_agctl[index].numinos =
133 le32_to_cpu(dinom_le->in_agctl[index].numinos);
134 imap->im_agctl[index].numfree =
135 le32_to_cpu(dinom_le->in_agctl[index].numfree);
136 }
137
138 /* release the buffer. */
139 release_metapage(mp);
140
141 /*
142 * allocate/initialize inode allocation map locks
143 */
144 /* allocate and init iag free list lock */
145 IAGFREE_LOCK_INIT(imap);
146
147 /* allocate and init ag list locks */
148 for (index = 0; index < MAXAG; index++) {
149 AG_LOCK_INIT(imap, index);
150 }
151
152 /* bind the inode map inode and inode map control structure
153 * to each other.
154 */
155 imap->im_ipimap = ipimap;
156 JFS_IP(ipimap)->i_imap = imap;
157
158 return (0);
159 }
160
161
162 /*
163 * NAME: diUnmount()
164 *
165 * FUNCTION: write to disk the incore inode map control structures for
166 * a fileset or aggregate at unmount time.
167 *
168 * PARAMETERS:
169 * ipimap - pointer to inode map inode for the aggregate or fileset.
170 *
171 * RETURN VALUES:
172 * 0 - success
173 * -ENOMEM - insufficient free virtual memory.
174 * -EIO - i/o error.
175 */
diUnmount(struct inode * ipimap,int mounterror)176 int diUnmount(struct inode *ipimap, int mounterror)
177 {
178 struct inomap *imap = JFS_IP(ipimap)->i_imap;
179
180 /*
181 * update the on-disk inode map control structure
182 */
183
184 if (!(mounterror || isReadOnly(ipimap)))
185 diSync(ipimap);
186
187 /*
188 * Invalidate the page cache buffers
189 */
190 truncate_inode_pages(ipimap->i_mapping, 0);
191
192 /*
193 * free in-memory control structure
194 */
195 kfree(imap);
196 JFS_IP(ipimap)->i_imap = NULL;
197
198 return (0);
199 }
200
201
202 /*
203 * diSync()
204 */
diSync(struct inode * ipimap)205 int diSync(struct inode *ipimap)
206 {
207 struct dinomap_disk *dinom_le;
208 struct inomap *imp = JFS_IP(ipimap)->i_imap;
209 struct metapage *mp;
210 int index;
211
212 /*
213 * write imap global conrol page
214 */
215 /* read the on-disk inode map control structure */
216 mp = get_metapage(ipimap,
217 IMAPBLKNO << JFS_SBI(ipimap->i_sb)->l2nbperpage,
218 PSIZE, 0);
219 if (mp == NULL) {
220 jfs_err("diSync: get_metapage failed!");
221 return -EIO;
222 }
223
224 /* copy the in-memory version to the on-disk version */
225 dinom_le = (struct dinomap_disk *) mp->data;
226 dinom_le->in_freeiag = cpu_to_le32(imp->im_freeiag);
227 dinom_le->in_nextiag = cpu_to_le32(imp->im_nextiag);
228 dinom_le->in_numinos = cpu_to_le32(atomic_read(&imp->im_numinos));
229 dinom_le->in_numfree = cpu_to_le32(atomic_read(&imp->im_numfree));
230 dinom_le->in_nbperiext = cpu_to_le32(imp->im_nbperiext);
231 dinom_le->in_l2nbperiext = cpu_to_le32(imp->im_l2nbperiext);
232 for (index = 0; index < MAXAG; index++) {
233 dinom_le->in_agctl[index].inofree =
234 cpu_to_le32(imp->im_agctl[index].inofree);
235 dinom_le->in_agctl[index].extfree =
236 cpu_to_le32(imp->im_agctl[index].extfree);
237 dinom_le->in_agctl[index].numinos =
238 cpu_to_le32(imp->im_agctl[index].numinos);
239 dinom_le->in_agctl[index].numfree =
240 cpu_to_le32(imp->im_agctl[index].numfree);
241 }
242
243 /* write out the control structure */
244 write_metapage(mp);
245
246 /*
247 * write out dirty pages of imap
248 */
249 filemap_write_and_wait(ipimap->i_mapping);
250
251 diWriteSpecial(ipimap, 0);
252
253 return (0);
254 }
255
256
257 /*
258 * NAME: diRead()
259 *
260 * FUNCTION: initialize an incore inode from disk.
261 *
262 * on entry, the specifed incore inode should itself
263 * specify the disk inode number corresponding to the
264 * incore inode (i.e. i_number should be initialized).
265 *
266 * this routine handles incore inode initialization for
267 * both "special" and "regular" inodes. special inodes
268 * are those required early in the mount process and
269 * require special handling since much of the file system
270 * is not yet initialized. these "special" inodes are
271 * identified by a NULL inode map inode pointer and are
272 * actually initialized by a call to diReadSpecial().
273 *
274 * for regular inodes, the iag describing the disk inode
275 * is read from disk to determine the inode extent address
276 * for the disk inode. with the inode extent address in
277 * hand, the page of the extent that contains the disk
278 * inode is read and the disk inode is copied to the
279 * incore inode.
280 *
281 * PARAMETERS:
282 * ip - pointer to incore inode to be initialized from disk.
283 *
284 * RETURN VALUES:
285 * 0 - success
286 * -EIO - i/o error.
287 * -ENOMEM - insufficient memory
288 *
289 */
diRead(struct inode * ip)290 int diRead(struct inode *ip)
291 {
292 struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
293 int iagno, ino, extno, rc;
294 struct inode *ipimap;
295 struct dinode *dp;
296 struct iag *iagp;
297 struct metapage *mp;
298 s64 blkno, agstart;
299 struct inomap *imap;
300 int block_offset;
301 int inodes_left;
302 unsigned long pageno;
303 int rel_inode;
304
305 jfs_info("diRead: ino = %ld", ip->i_ino);
306
307 ipimap = sbi->ipimap;
308 JFS_IP(ip)->ipimap = ipimap;
309
310 /* determine the iag number for this inode (number) */
311 iagno = INOTOIAG(ip->i_ino);
312
313 /* read the iag */
314 imap = JFS_IP(ipimap)->i_imap;
315 IREAD_LOCK(ipimap, RDWRLOCK_IMAP);
316 rc = diIAGRead(imap, iagno, &mp);
317 IREAD_UNLOCK(ipimap);
318 if (rc) {
319 jfs_err("diRead: diIAGRead returned %d", rc);
320 return (rc);
321 }
322
323 iagp = (struct iag *) mp->data;
324
325 /* determine inode extent that holds the disk inode */
326 ino = ip->i_ino & (INOSPERIAG - 1);
327 extno = ino >> L2INOSPEREXT;
328
329 if ((lengthPXD(&iagp->inoext[extno]) != imap->im_nbperiext) ||
330 (addressPXD(&iagp->inoext[extno]) == 0)) {
331 release_metapage(mp);
332 return -ESTALE;
333 }
334
335 /* get disk block number of the page within the inode extent
336 * that holds the disk inode.
337 */
338 blkno = INOPBLK(&iagp->inoext[extno], ino, sbi->l2nbperpage);
339
340 /* get the ag for the iag */
341 agstart = le64_to_cpu(iagp->agstart);
342
343 release_metapage(mp);
344
345 rel_inode = (ino & (INOSPERPAGE - 1));
346 pageno = blkno >> sbi->l2nbperpage;
347
348 if ((block_offset = ((u32) blkno & (sbi->nbperpage - 1)))) {
349 /*
350 * OS/2 didn't always align inode extents on page boundaries
351 */
352 inodes_left =
353 (sbi->nbperpage - block_offset) << sbi->l2niperblk;
354
355 if (rel_inode < inodes_left)
356 rel_inode += block_offset << sbi->l2niperblk;
357 else {
358 pageno += 1;
359 rel_inode -= inodes_left;
360 }
361 }
362
363 /* read the page of disk inode */
364 mp = read_metapage(ipimap, pageno << sbi->l2nbperpage, PSIZE, 1);
365 if (!mp) {
366 jfs_err("diRead: read_metapage failed");
367 return -EIO;
368 }
369
370 /* locate the disk inode requested */
371 dp = (struct dinode *) mp->data;
372 dp += rel_inode;
373
374 if (ip->i_ino != le32_to_cpu(dp->di_number)) {
375 jfs_error(ip->i_sb, "i_ino != di_number\n");
376 rc = -EIO;
377 } else if (le32_to_cpu(dp->di_nlink) == 0)
378 rc = -ESTALE;
379 else
380 /* copy the disk inode to the in-memory inode */
381 rc = copy_from_dinode(dp, ip);
382
383 release_metapage(mp);
384
385 /* set the ag for the inode */
386 JFS_IP(ip)->agstart = agstart;
387 JFS_IP(ip)->active_ag = -1;
388
389 return (rc);
390 }
391
392
393 /*
394 * NAME: diReadSpecial()
395 *
396 * FUNCTION: initialize a 'special' inode from disk.
397 *
398 * this routines handles aggregate level inodes. The
399 * inode cache cannot differentiate between the
400 * aggregate inodes and the filesystem inodes, so we
401 * handle these here. We don't actually use the aggregate
402 * inode map, since these inodes are at a fixed location
403 * and in some cases the aggregate inode map isn't initialized
404 * yet.
405 *
406 * PARAMETERS:
407 * sb - filesystem superblock
408 * inum - aggregate inode number
409 * secondary - 1 if secondary aggregate inode table
410 *
411 * RETURN VALUES:
412 * new inode - success
413 * NULL - i/o error.
414 */
diReadSpecial(struct super_block * sb,ino_t inum,int secondary)415 struct inode *diReadSpecial(struct super_block *sb, ino_t inum, int secondary)
416 {
417 struct jfs_sb_info *sbi = JFS_SBI(sb);
418 uint address;
419 struct dinode *dp;
420 struct inode *ip;
421 struct metapage *mp;
422
423 ip = new_inode(sb);
424 if (ip == NULL) {
425 jfs_err("diReadSpecial: new_inode returned NULL!");
426 return ip;
427 }
428
429 if (secondary) {
430 address = addressPXD(&sbi->ait2) >> sbi->l2nbperpage;
431 JFS_IP(ip)->ipimap = sbi->ipaimap2;
432 } else {
433 address = AITBL_OFF >> L2PSIZE;
434 JFS_IP(ip)->ipimap = sbi->ipaimap;
435 }
436
437 ASSERT(inum < INOSPEREXT);
438
439 ip->i_ino = inum;
440
441 address += inum >> 3; /* 8 inodes per 4K page */
442
443 /* read the page of fixed disk inode (AIT) in raw mode */
444 mp = read_metapage(ip, address << sbi->l2nbperpage, PSIZE, 1);
445 if (mp == NULL) {
446 set_nlink(ip, 1); /* Don't want iput() deleting it */
447 iput(ip);
448 return (NULL);
449 }
450
451 /* get the pointer to the disk inode of interest */
452 dp = (struct dinode *) (mp->data);
453 dp += inum % 8; /* 8 inodes per 4K page */
454
455 /* copy on-disk inode to in-memory inode */
456 if ((copy_from_dinode(dp, ip)) != 0) {
457 /* handle bad return by returning NULL for ip */
458 set_nlink(ip, 1); /* Don't want iput() deleting it */
459 iput(ip);
460 /* release the page */
461 release_metapage(mp);
462 return (NULL);
463
464 }
465
466 ip->i_mapping->a_ops = &jfs_metapage_aops;
467 mapping_set_gfp_mask(ip->i_mapping, GFP_NOFS);
468
469 /* Allocations to metadata inodes should not affect quotas */
470 ip->i_flags |= S_NOQUOTA;
471
472 if ((inum == FILESYSTEM_I) && (JFS_IP(ip)->ipimap == sbi->ipaimap)) {
473 sbi->gengen = le32_to_cpu(dp->di_gengen);
474 sbi->inostamp = le32_to_cpu(dp->di_inostamp);
475 }
476
477 /* release the page */
478 release_metapage(mp);
479
480 inode_fake_hash(ip);
481
482 return (ip);
483 }
484
485 /*
486 * NAME: diWriteSpecial()
487 *
488 * FUNCTION: Write the special inode to disk
489 *
490 * PARAMETERS:
491 * ip - special inode
492 * secondary - 1 if secondary aggregate inode table
493 *
494 * RETURN VALUES: none
495 */
496
diWriteSpecial(struct inode * ip,int secondary)497 void diWriteSpecial(struct inode *ip, int secondary)
498 {
499 struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
500 uint address;
501 struct dinode *dp;
502 ino_t inum = ip->i_ino;
503 struct metapage *mp;
504
505 if (secondary)
506 address = addressPXD(&sbi->ait2) >> sbi->l2nbperpage;
507 else
508 address = AITBL_OFF >> L2PSIZE;
509
510 ASSERT(inum < INOSPEREXT);
511
512 address += inum >> 3; /* 8 inodes per 4K page */
513
514 /* read the page of fixed disk inode (AIT) in raw mode */
515 mp = read_metapage(ip, address << sbi->l2nbperpage, PSIZE, 1);
516 if (mp == NULL) {
517 jfs_err("diWriteSpecial: failed to read aggregate inode extent!");
518 return;
519 }
520
521 /* get the pointer to the disk inode of interest */
522 dp = (struct dinode *) (mp->data);
523 dp += inum % 8; /* 8 inodes per 4K page */
524
525 /* copy on-disk inode to in-memory inode */
526 copy_to_dinode(dp, ip);
527 memcpy(&dp->di_xtroot, &JFS_IP(ip)->i_xtroot, 288);
528
529 if (inum == FILESYSTEM_I)
530 dp->di_gengen = cpu_to_le32(sbi->gengen);
531
532 /* write the page */
533 write_metapage(mp);
534 }
535
536 /*
537 * NAME: diFreeSpecial()
538 *
539 * FUNCTION: Free allocated space for special inode
540 */
diFreeSpecial(struct inode * ip)541 void diFreeSpecial(struct inode *ip)
542 {
543 if (ip == NULL) {
544 jfs_err("diFreeSpecial called with NULL ip!");
545 return;
546 }
547 filemap_write_and_wait(ip->i_mapping);
548 truncate_inode_pages(ip->i_mapping, 0);
549 iput(ip);
550 }
551
552
553
554 /*
555 * NAME: diWrite()
556 *
557 * FUNCTION: write the on-disk inode portion of the in-memory inode
558 * to its corresponding on-disk inode.
559 *
560 * on entry, the specifed incore inode should itself
561 * specify the disk inode number corresponding to the
562 * incore inode (i.e. i_number should be initialized).
563 *
564 * the inode contains the inode extent address for the disk
565 * inode. with the inode extent address in hand, the
566 * page of the extent that contains the disk inode is
567 * read and the disk inode portion of the incore inode
568 * is copied to the disk inode.
569 *
570 * PARAMETERS:
571 * tid - transacation id
572 * ip - pointer to incore inode to be written to the inode extent.
573 *
574 * RETURN VALUES:
575 * 0 - success
576 * -EIO - i/o error.
577 */
diWrite(tid_t tid,struct inode * ip)578 int diWrite(tid_t tid, struct inode *ip)
579 {
580 struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
581 struct jfs_inode_info *jfs_ip = JFS_IP(ip);
582 int rc = 0;
583 s32 ino;
584 struct dinode *dp;
585 s64 blkno;
586 int block_offset;
587 int inodes_left;
588 struct metapage *mp;
589 unsigned long pageno;
590 int rel_inode;
591 int dioffset;
592 struct inode *ipimap;
593 uint type;
594 lid_t lid;
595 struct tlock *ditlck, *tlck;
596 struct linelock *dilinelock, *ilinelock;
597 struct lv *lv;
598 int n;
599
600 ipimap = jfs_ip->ipimap;
601
602 ino = ip->i_ino & (INOSPERIAG - 1);
603
604 if (!addressPXD(&(jfs_ip->ixpxd)) ||
605 (lengthPXD(&(jfs_ip->ixpxd)) !=
606 JFS_IP(ipimap)->i_imap->im_nbperiext)) {
607 jfs_error(ip->i_sb, "ixpxd invalid\n");
608 return -EIO;
609 }
610
611 /*
612 * read the page of disk inode containing the specified inode:
613 */
614 /* compute the block address of the page */
615 blkno = INOPBLK(&(jfs_ip->ixpxd), ino, sbi->l2nbperpage);
616
617 rel_inode = (ino & (INOSPERPAGE - 1));
618 pageno = blkno >> sbi->l2nbperpage;
619
620 if ((block_offset = ((u32) blkno & (sbi->nbperpage - 1)))) {
621 /*
622 * OS/2 didn't always align inode extents on page boundaries
623 */
624 inodes_left =
625 (sbi->nbperpage - block_offset) << sbi->l2niperblk;
626
627 if (rel_inode < inodes_left)
628 rel_inode += block_offset << sbi->l2niperblk;
629 else {
630 pageno += 1;
631 rel_inode -= inodes_left;
632 }
633 }
634 /* read the page of disk inode */
635 retry:
636 mp = read_metapage(ipimap, pageno << sbi->l2nbperpage, PSIZE, 1);
637 if (!mp)
638 return -EIO;
639
640 /* get the pointer to the disk inode */
641 dp = (struct dinode *) mp->data;
642 dp += rel_inode;
643
644 dioffset = (ino & (INOSPERPAGE - 1)) << L2DISIZE;
645
646 /*
647 * acquire transaction lock on the on-disk inode;
648 * N.B. tlock is acquired on ipimap not ip;
649 */
650 if ((ditlck =
651 txLock(tid, ipimap, mp, tlckINODE | tlckENTRY)) == NULL)
652 goto retry;
653 dilinelock = (struct linelock *) & ditlck->lock;
654
655 /*
656 * copy btree root from in-memory inode to on-disk inode
657 *
658 * (tlock is taken from inline B+-tree root in in-memory
659 * inode when the B+-tree root is updated, which is pointed
660 * by jfs_ip->blid as well as being on tx tlock list)
661 *
662 * further processing of btree root is based on the copy
663 * in in-memory inode, where txLog() will log from, and,
664 * for xtree root, txUpdateMap() will update map and reset
665 * XAD_NEW bit;
666 */
667
668 if (S_ISDIR(ip->i_mode) && (lid = jfs_ip->xtlid)) {
669 /*
670 * This is the special xtree inside the directory for storing
671 * the directory table
672 */
673 xtpage_t *p, *xp;
674 xad_t *xad;
675
676 jfs_ip->xtlid = 0;
677 tlck = lid_to_tlock(lid);
678 assert(tlck->type & tlckXTREE);
679 tlck->type |= tlckBTROOT;
680 tlck->mp = mp;
681 ilinelock = (struct linelock *) & tlck->lock;
682
683 /*
684 * copy xtree root from inode to dinode:
685 */
686 p = &jfs_ip->i_xtroot;
687 xp = (xtpage_t *) &dp->di_dirtable;
688 lv = ilinelock->lv;
689 for (n = 0; n < ilinelock->index; n++, lv++) {
690 memcpy(&xp->xad[lv->offset], &p->xad[lv->offset],
691 lv->length << L2XTSLOTSIZE);
692 }
693
694 /* reset on-disk (metadata page) xtree XAD_NEW bit */
695 xad = &xp->xad[XTENTRYSTART];
696 for (n = XTENTRYSTART;
697 n < le16_to_cpu(xp->header.nextindex); n++, xad++)
698 if (xad->flag & (XAD_NEW | XAD_EXTENDED))
699 xad->flag &= ~(XAD_NEW | XAD_EXTENDED);
700 }
701
702 if ((lid = jfs_ip->blid) == 0)
703 goto inlineData;
704 jfs_ip->blid = 0;
705
706 tlck = lid_to_tlock(lid);
707 type = tlck->type;
708 tlck->type |= tlckBTROOT;
709 tlck->mp = mp;
710 ilinelock = (struct linelock *) & tlck->lock;
711
712 /*
713 * regular file: 16 byte (XAD slot) granularity
714 */
715 if (type & tlckXTREE) {
716 xtpage_t *p, *xp;
717 xad_t *xad;
718
719 /*
720 * copy xtree root from inode to dinode:
721 */
722 p = &jfs_ip->i_xtroot;
723 xp = &dp->di_xtroot;
724 lv = ilinelock->lv;
725 for (n = 0; n < ilinelock->index; n++, lv++) {
726 memcpy(&xp->xad[lv->offset], &p->xad[lv->offset],
727 lv->length << L2XTSLOTSIZE);
728 }
729
730 /* reset on-disk (metadata page) xtree XAD_NEW bit */
731 xad = &xp->xad[XTENTRYSTART];
732 for (n = XTENTRYSTART;
733 n < le16_to_cpu(xp->header.nextindex); n++, xad++)
734 if (xad->flag & (XAD_NEW | XAD_EXTENDED))
735 xad->flag &= ~(XAD_NEW | XAD_EXTENDED);
736 }
737 /*
738 * directory: 32 byte (directory entry slot) granularity
739 */
740 else if (type & tlckDTREE) {
741 dtpage_t *p, *xp;
742
743 /*
744 * copy dtree root from inode to dinode:
745 */
746 p = (dtpage_t *) &jfs_ip->i_dtroot;
747 xp = (dtpage_t *) & dp->di_dtroot;
748 lv = ilinelock->lv;
749 for (n = 0; n < ilinelock->index; n++, lv++) {
750 memcpy(&xp->slot[lv->offset], &p->slot[lv->offset],
751 lv->length << L2DTSLOTSIZE);
752 }
753 } else {
754 jfs_err("diWrite: UFO tlock");
755 }
756
757 inlineData:
758 /*
759 * copy inline symlink from in-memory inode to on-disk inode
760 */
761 if (S_ISLNK(ip->i_mode) && ip->i_size < IDATASIZE) {
762 lv = & dilinelock->lv[dilinelock->index];
763 lv->offset = (dioffset + 2 * 128) >> L2INODESLOTSIZE;
764 lv->length = 2;
765 memcpy(&dp->di_inline_all, jfs_ip->i_inline_all, IDATASIZE);
766 dilinelock->index++;
767 }
768 /*
769 * copy inline data from in-memory inode to on-disk inode:
770 * 128 byte slot granularity
771 */
772 if (test_cflag(COMMIT_Inlineea, ip)) {
773 lv = & dilinelock->lv[dilinelock->index];
774 lv->offset = (dioffset + 3 * 128) >> L2INODESLOTSIZE;
775 lv->length = 1;
776 memcpy(&dp->di_inlineea, jfs_ip->i_inline_ea, INODESLOTSIZE);
777 dilinelock->index++;
778
779 clear_cflag(COMMIT_Inlineea, ip);
780 }
781
782 /*
783 * lock/copy inode base: 128 byte slot granularity
784 */
785 lv = & dilinelock->lv[dilinelock->index];
786 lv->offset = dioffset >> L2INODESLOTSIZE;
787 copy_to_dinode(dp, ip);
788 if (test_and_clear_cflag(COMMIT_Dirtable, ip)) {
789 lv->length = 2;
790 memcpy(&dp->di_dirtable, &jfs_ip->i_dirtable, 96);
791 } else
792 lv->length = 1;
793 dilinelock->index++;
794
795 /* release the buffer holding the updated on-disk inode.
796 * the buffer will be later written by commit processing.
797 */
798 write_metapage(mp);
799
800 return (rc);
801 }
802
803
804 /*
805 * NAME: diFree(ip)
806 *
807 * FUNCTION: free a specified inode from the inode working map
808 * for a fileset or aggregate.
809 *
810 * if the inode to be freed represents the first (only)
811 * free inode within the iag, the iag will be placed on
812 * the ag free inode list.
813 *
814 * freeing the inode will cause the inode extent to be
815 * freed if the inode is the only allocated inode within
816 * the extent. in this case all the disk resource backing
817 * up the inode extent will be freed. in addition, the iag
818 * will be placed on the ag extent free list if the extent
819 * is the first free extent in the iag. if freeing the
820 * extent also means that no free inodes will exist for
821 * the iag, the iag will also be removed from the ag free
822 * inode list.
823 *
824 * the iag describing the inode will be freed if the extent
825 * is to be freed and it is the only backed extent within
826 * the iag. in this case, the iag will be removed from the
827 * ag free extent list and ag free inode list and placed on
828 * the inode map's free iag list.
829 *
830 * a careful update approach is used to provide consistency
831 * in the face of updates to multiple buffers. under this
832 * approach, all required buffers are obtained before making
833 * any updates and are held until all updates are complete.
834 *
835 * PARAMETERS:
836 * ip - inode to be freed.
837 *
838 * RETURN VALUES:
839 * 0 - success
840 * -EIO - i/o error.
841 */
diFree(struct inode * ip)842 int diFree(struct inode *ip)
843 {
844 int rc;
845 ino_t inum = ip->i_ino;
846 struct iag *iagp, *aiagp, *biagp, *ciagp, *diagp;
847 struct metapage *mp, *amp, *bmp, *cmp, *dmp;
848 int iagno, ino, extno, bitno, sword, agno;
849 int back, fwd;
850 u32 bitmap, mask;
851 struct inode *ipimap = JFS_SBI(ip->i_sb)->ipimap;
852 struct inomap *imap = JFS_IP(ipimap)->i_imap;
853 pxd_t freepxd;
854 tid_t tid;
855 struct inode *iplist[3];
856 struct tlock *tlck;
857 struct pxd_lock *pxdlock;
858
859 /*
860 * This is just to suppress compiler warnings. The same logic that
861 * references these variables is used to initialize them.
862 */
863 aiagp = biagp = ciagp = diagp = NULL;
864
865 /* get the iag number containing the inode.
866 */
867 iagno = INOTOIAG(inum);
868
869 /* make sure that the iag is contained within
870 * the map.
871 */
872 if (iagno >= imap->im_nextiag) {
873 print_hex_dump(KERN_ERR, "imap: ", DUMP_PREFIX_ADDRESS, 16, 4,
874 imap, 32, 0);
875 jfs_error(ip->i_sb, "inum = %d, iagno = %d, nextiag = %d\n",
876 (uint) inum, iagno, imap->im_nextiag);
877 return -EIO;
878 }
879
880 /* get the allocation group for this ino.
881 */
882 agno = BLKTOAG(JFS_IP(ip)->agstart, JFS_SBI(ip->i_sb));
883
884 /* Lock the AG specific inode map information
885 */
886 AG_LOCK(imap, agno);
887
888 /* Obtain read lock in imap inode. Don't release it until we have
889 * read all of the IAG's that we are going to.
890 */
891 IREAD_LOCK(ipimap, RDWRLOCK_IMAP);
892
893 /* read the iag.
894 */
895 if ((rc = diIAGRead(imap, iagno, &mp))) {
896 IREAD_UNLOCK(ipimap);
897 AG_UNLOCK(imap, agno);
898 return (rc);
899 }
900 iagp = (struct iag *) mp->data;
901
902 /* get the inode number and extent number of the inode within
903 * the iag and the inode number within the extent.
904 */
905 ino = inum & (INOSPERIAG - 1);
906 extno = ino >> L2INOSPEREXT;
907 bitno = ino & (INOSPEREXT - 1);
908 mask = HIGHORDER >> bitno;
909
910 if (!(le32_to_cpu(iagp->wmap[extno]) & mask)) {
911 jfs_error(ip->i_sb, "wmap shows inode already free\n");
912 }
913
914 if (!addressPXD(&iagp->inoext[extno])) {
915 release_metapage(mp);
916 IREAD_UNLOCK(ipimap);
917 AG_UNLOCK(imap, agno);
918 jfs_error(ip->i_sb, "invalid inoext\n");
919 return -EIO;
920 }
921
922 /* compute the bitmap for the extent reflecting the freed inode.
923 */
924 bitmap = le32_to_cpu(iagp->wmap[extno]) & ~mask;
925
926 if (imap->im_agctl[agno].numfree > imap->im_agctl[agno].numinos) {
927 release_metapage(mp);
928 IREAD_UNLOCK(ipimap);
929 AG_UNLOCK(imap, agno);
930 jfs_error(ip->i_sb, "numfree > numinos\n");
931 return -EIO;
932 }
933 /*
934 * inode extent still has some inodes or below low water mark:
935 * keep the inode extent;
936 */
937 if (bitmap ||
938 imap->im_agctl[agno].numfree < 96 ||
939 (imap->im_agctl[agno].numfree < 288 &&
940 (((imap->im_agctl[agno].numfree * 100) /
941 imap->im_agctl[agno].numinos) <= 25))) {
942 /* if the iag currently has no free inodes (i.e.,
943 * the inode being freed is the first free inode of iag),
944 * insert the iag at head of the inode free list for the ag.
945 */
946 if (iagp->nfreeinos == 0) {
947 /* check if there are any iags on the ag inode
948 * free list. if so, read the first one so that
949 * we can link the current iag onto the list at
950 * the head.
951 */
952 if ((fwd = imap->im_agctl[agno].inofree) >= 0) {
953 /* read the iag that currently is the head
954 * of the list.
955 */
956 if ((rc = diIAGRead(imap, fwd, &))) {
957 IREAD_UNLOCK(ipimap);
958 AG_UNLOCK(imap, agno);
959 release_metapage(mp);
960 return (rc);
961 }
962 aiagp = (struct iag *) amp->data;
963
964 /* make current head point back to the iag.
965 */
966 aiagp->inofreeback = cpu_to_le32(iagno);
967
968 write_metapage(amp);
969 }
970
971 /* iag points forward to current head and iag
972 * becomes the new head of the list.
973 */
974 iagp->inofreefwd =
975 cpu_to_le32(imap->im_agctl[agno].inofree);
976 iagp->inofreeback = cpu_to_le32(-1);
977 imap->im_agctl[agno].inofree = iagno;
978 }
979 IREAD_UNLOCK(ipimap);
980
981 /* update the free inode summary map for the extent if
982 * freeing the inode means the extent will now have free
983 * inodes (i.e., the inode being freed is the first free
984 * inode of extent),
985 */
986 if (iagp->wmap[extno] == cpu_to_le32(ONES)) {
987 sword = extno >> L2EXTSPERSUM;
988 bitno = extno & (EXTSPERSUM - 1);
989 iagp->inosmap[sword] &=
990 cpu_to_le32(~(HIGHORDER >> bitno));
991 }
992
993 /* update the bitmap.
994 */
995 iagp->wmap[extno] = cpu_to_le32(bitmap);
996
997 /* update the free inode counts at the iag, ag and
998 * map level.
999 */
1000 le32_add_cpu(&iagp->nfreeinos, 1);
1001 imap->im_agctl[agno].numfree += 1;
1002 atomic_inc(&imap->im_numfree);
1003
1004 /* release the AG inode map lock
1005 */
1006 AG_UNLOCK(imap, agno);
1007
1008 /* write the iag */
1009 write_metapage(mp);
1010
1011 return (0);
1012 }
1013
1014
1015 /*
1016 * inode extent has become free and above low water mark:
1017 * free the inode extent;
1018 */
1019
1020 /*
1021 * prepare to update iag list(s) (careful update step 1)
1022 */
1023 amp = bmp = cmp = dmp = NULL;
1024 fwd = back = -1;
1025
1026 /* check if the iag currently has no free extents. if so,
1027 * it will be placed on the head of the ag extent free list.
1028 */
1029 if (iagp->nfreeexts == 0) {
1030 /* check if the ag extent free list has any iags.
1031 * if so, read the iag at the head of the list now.
1032 * this (head) iag will be updated later to reflect
1033 * the addition of the current iag at the head of
1034 * the list.
1035 */
1036 if ((fwd = imap->im_agctl[agno].extfree) >= 0) {
1037 if ((rc = diIAGRead(imap, fwd, &)))
1038 goto error_out;
1039 aiagp = (struct iag *) amp->data;
1040 }
1041 } else {
1042 /* iag has free extents. check if the addition of a free
1043 * extent will cause all extents to be free within this
1044 * iag. if so, the iag will be removed from the ag extent
1045 * free list and placed on the inode map's free iag list.
1046 */
1047 if (iagp->nfreeexts == cpu_to_le32(EXTSPERIAG - 1)) {
1048 /* in preparation for removing the iag from the
1049 * ag extent free list, read the iags preceding
1050 * and following the iag on the ag extent free
1051 * list.
1052 */
1053 if ((fwd = le32_to_cpu(iagp->extfreefwd)) >= 0) {
1054 if ((rc = diIAGRead(imap, fwd, &)))
1055 goto error_out;
1056 aiagp = (struct iag *) amp->data;
1057 }
1058
1059 if ((back = le32_to_cpu(iagp->extfreeback)) >= 0) {
1060 if ((rc = diIAGRead(imap, back, &bmp)))
1061 goto error_out;
1062 biagp = (struct iag *) bmp->data;
1063 }
1064 }
1065 }
1066
1067 /* remove the iag from the ag inode free list if freeing
1068 * this extent cause the iag to have no free inodes.
1069 */
1070 if (iagp->nfreeinos == cpu_to_le32(INOSPEREXT - 1)) {
1071 int inofreeback = le32_to_cpu(iagp->inofreeback);
1072 int inofreefwd = le32_to_cpu(iagp->inofreefwd);
1073
1074 /* in preparation for removing the iag from the
1075 * ag inode free list, read the iags preceding
1076 * and following the iag on the ag inode free
1077 * list. before reading these iags, we must make
1078 * sure that we already don't have them in hand
1079 * from up above, since re-reading an iag (buffer)
1080 * we are currently holding would cause a deadlock.
1081 */
1082 if (inofreefwd >= 0) {
1083
1084 if (inofreefwd == fwd)
1085 ciagp = (struct iag *) amp->data;
1086 else if (inofreefwd == back)
1087 ciagp = (struct iag *) bmp->data;
1088 else {
1089 if ((rc =
1090 diIAGRead(imap, inofreefwd, &cmp)))
1091 goto error_out;
1092 ciagp = (struct iag *) cmp->data;
1093 }
1094 assert(ciagp != NULL);
1095 }
1096
1097 if (inofreeback >= 0) {
1098 if (inofreeback == fwd)
1099 diagp = (struct iag *) amp->data;
1100 else if (inofreeback == back)
1101 diagp = (struct iag *) bmp->data;
1102 else {
1103 if ((rc =
1104 diIAGRead(imap, inofreeback, &dmp)))
1105 goto error_out;
1106 diagp = (struct iag *) dmp->data;
1107 }
1108 assert(diagp != NULL);
1109 }
1110 }
1111
1112 IREAD_UNLOCK(ipimap);
1113
1114 /*
1115 * invalidate any page of the inode extent freed from buffer cache;
1116 */
1117 freepxd = iagp->inoext[extno];
1118 invalidate_pxd_metapages(ip, freepxd);
1119
1120 /*
1121 * update iag list(s) (careful update step 2)
1122 */
1123 /* add the iag to the ag extent free list if this is the
1124 * first free extent for the iag.
1125 */
1126 if (iagp->nfreeexts == 0) {
1127 if (fwd >= 0)
1128 aiagp->extfreeback = cpu_to_le32(iagno);
1129
1130 iagp->extfreefwd =
1131 cpu_to_le32(imap->im_agctl[agno].extfree);
1132 iagp->extfreeback = cpu_to_le32(-1);
1133 imap->im_agctl[agno].extfree = iagno;
1134 } else {
1135 /* remove the iag from the ag extent list if all extents
1136 * are now free and place it on the inode map iag free list.
1137 */
1138 if (iagp->nfreeexts == cpu_to_le32(EXTSPERIAG - 1)) {
1139 if (fwd >= 0)
1140 aiagp->extfreeback = iagp->extfreeback;
1141
1142 if (back >= 0)
1143 biagp->extfreefwd = iagp->extfreefwd;
1144 else
1145 imap->im_agctl[agno].extfree =
1146 le32_to_cpu(iagp->extfreefwd);
1147
1148 iagp->extfreefwd = iagp->extfreeback = cpu_to_le32(-1);
1149
1150 IAGFREE_LOCK(imap);
1151 iagp->iagfree = cpu_to_le32(imap->im_freeiag);
1152 imap->im_freeiag = iagno;
1153 IAGFREE_UNLOCK(imap);
1154 }
1155 }
1156
1157 /* remove the iag from the ag inode free list if freeing
1158 * this extent causes the iag to have no free inodes.
1159 */
1160 if (iagp->nfreeinos == cpu_to_le32(INOSPEREXT - 1)) {
1161 if ((int) le32_to_cpu(iagp->inofreefwd) >= 0)
1162 ciagp->inofreeback = iagp->inofreeback;
1163
1164 if ((int) le32_to_cpu(iagp->inofreeback) >= 0)
1165 diagp->inofreefwd = iagp->inofreefwd;
1166 else
1167 imap->im_agctl[agno].inofree =
1168 le32_to_cpu(iagp->inofreefwd);
1169
1170 iagp->inofreefwd = iagp->inofreeback = cpu_to_le32(-1);
1171 }
1172
1173 /* update the inode extent address and working map
1174 * to reflect the free extent.
1175 * the permanent map should have been updated already
1176 * for the inode being freed.
1177 */
1178 if (iagp->pmap[extno] != 0) {
1179 jfs_error(ip->i_sb, "the pmap does not show inode free\n");
1180 }
1181 iagp->wmap[extno] = 0;
1182 PXDlength(&iagp->inoext[extno], 0);
1183 PXDaddress(&iagp->inoext[extno], 0);
1184
1185 /* update the free extent and free inode summary maps
1186 * to reflect the freed extent.
1187 * the inode summary map is marked to indicate no inodes
1188 * available for the freed extent.
1189 */
1190 sword = extno >> L2EXTSPERSUM;
1191 bitno = extno & (EXTSPERSUM - 1);
1192 mask = HIGHORDER >> bitno;
1193 iagp->inosmap[sword] |= cpu_to_le32(mask);
1194 iagp->extsmap[sword] &= cpu_to_le32(~mask);
1195
1196 /* update the number of free inodes and number of free extents
1197 * for the iag.
1198 */
1199 le32_add_cpu(&iagp->nfreeinos, -(INOSPEREXT - 1));
1200 le32_add_cpu(&iagp->nfreeexts, 1);
1201
1202 /* update the number of free inodes and backed inodes
1203 * at the ag and inode map level.
1204 */
1205 imap->im_agctl[agno].numfree -= (INOSPEREXT - 1);
1206 imap->im_agctl[agno].numinos -= INOSPEREXT;
1207 atomic_sub(INOSPEREXT - 1, &imap->im_numfree);
1208 atomic_sub(INOSPEREXT, &imap->im_numinos);
1209
1210 if (amp)
1211 write_metapage(amp);
1212 if (bmp)
1213 write_metapage(bmp);
1214 if (cmp)
1215 write_metapage(cmp);
1216 if (dmp)
1217 write_metapage(dmp);
1218
1219 /*
1220 * start transaction to update block allocation map
1221 * for the inode extent freed;
1222 *
1223 * N.B. AG_LOCK is released and iag will be released below, and
1224 * other thread may allocate inode from/reusing the ixad freed
1225 * BUT with new/different backing inode extent from the extent
1226 * to be freed by the transaction;
1227 */
1228 tid = txBegin(ipimap->i_sb, COMMIT_FORCE);
1229 mutex_lock(&JFS_IP(ipimap)->commit_mutex);
1230
1231 /* acquire tlock of the iag page of the freed ixad
1232 * to force the page NOHOMEOK (even though no data is
1233 * logged from the iag page) until NOREDOPAGE|FREEXTENT log
1234 * for the free of the extent is committed;
1235 * write FREEXTENT|NOREDOPAGE log record
1236 * N.B. linelock is overlaid as freed extent descriptor;
1237 */
1238 tlck = txLock(tid, ipimap, mp, tlckINODE | tlckFREE);
1239 pxdlock = (struct pxd_lock *) & tlck->lock;
1240 pxdlock->flag = mlckFREEPXD;
1241 pxdlock->pxd = freepxd;
1242 pxdlock->index = 1;
1243
1244 write_metapage(mp);
1245
1246 iplist[0] = ipimap;
1247
1248 /*
1249 * logredo needs the IAG number and IAG extent index in order
1250 * to ensure that the IMap is consistent. The least disruptive
1251 * way to pass these values through to the transaction manager
1252 * is in the iplist array.
1253 *
1254 * It's not pretty, but it works.
1255 */
1256 iplist[1] = (struct inode *) (size_t)iagno;
1257 iplist[2] = (struct inode *) (size_t)extno;
1258
1259 rc = txCommit(tid, 1, &iplist[0], COMMIT_FORCE);
1260
1261 txEnd(tid);
1262 mutex_unlock(&JFS_IP(ipimap)->commit_mutex);
1263
1264 /* unlock the AG inode map information */
1265 AG_UNLOCK(imap, agno);
1266
1267 return (0);
1268
1269 error_out:
1270 IREAD_UNLOCK(ipimap);
1271
1272 if (amp)
1273 release_metapage(amp);
1274 if (bmp)
1275 release_metapage(bmp);
1276 if (cmp)
1277 release_metapage(cmp);
1278 if (dmp)
1279 release_metapage(dmp);
1280
1281 AG_UNLOCK(imap, agno);
1282
1283 release_metapage(mp);
1284
1285 return (rc);
1286 }
1287
1288 /*
1289 * There are several places in the diAlloc* routines where we initialize
1290 * the inode.
1291 */
1292 static inline void
diInitInode(struct inode * ip,int iagno,int ino,int extno,struct iag * iagp)1293 diInitInode(struct inode *ip, int iagno, int ino, int extno, struct iag * iagp)
1294 {
1295 struct jfs_inode_info *jfs_ip = JFS_IP(ip);
1296
1297 ip->i_ino = (iagno << L2INOSPERIAG) + ino;
1298 jfs_ip->ixpxd = iagp->inoext[extno];
1299 jfs_ip->agstart = le64_to_cpu(iagp->agstart);
1300 jfs_ip->active_ag = -1;
1301 }
1302
1303
1304 /*
1305 * NAME: diAlloc(pip,dir,ip)
1306 *
1307 * FUNCTION: allocate a disk inode from the inode working map
1308 * for a fileset or aggregate.
1309 *
1310 * PARAMETERS:
1311 * pip - pointer to incore inode for the parent inode.
1312 * dir - 'true' if the new disk inode is for a directory.
1313 * ip - pointer to a new inode
1314 *
1315 * RETURN VALUES:
1316 * 0 - success.
1317 * -ENOSPC - insufficient disk resources.
1318 * -EIO - i/o error.
1319 */
diAlloc(struct inode * pip,bool dir,struct inode * ip)1320 int diAlloc(struct inode *pip, bool dir, struct inode *ip)
1321 {
1322 int rc, ino, iagno, addext, extno, bitno, sword;
1323 int nwords, rem, i, agno, dn_numag;
1324 u32 mask, inosmap, extsmap;
1325 struct inode *ipimap;
1326 struct metapage *mp;
1327 ino_t inum;
1328 struct iag *iagp;
1329 struct inomap *imap;
1330
1331 /* get the pointers to the inode map inode and the
1332 * corresponding imap control structure.
1333 */
1334 ipimap = JFS_SBI(pip->i_sb)->ipimap;
1335 imap = JFS_IP(ipimap)->i_imap;
1336 JFS_IP(ip)->ipimap = ipimap;
1337 JFS_IP(ip)->fileset = FILESYSTEM_I;
1338
1339 /* for a directory, the allocation policy is to start
1340 * at the ag level using the preferred ag.
1341 */
1342 if (dir) {
1343 agno = dbNextAG(JFS_SBI(pip->i_sb)->ipbmap);
1344 AG_LOCK(imap, agno);
1345 goto tryag;
1346 }
1347
1348 /* for files, the policy starts off by trying to allocate from
1349 * the same iag containing the parent disk inode:
1350 * try to allocate the new disk inode close to the parent disk
1351 * inode, using parent disk inode number + 1 as the allocation
1352 * hint. (we use a left-to-right policy to attempt to avoid
1353 * moving backward on the disk.) compute the hint within the
1354 * file system and the iag.
1355 */
1356
1357 /* get the ag number of this iag */
1358 agno = BLKTOAG(JFS_IP(pip)->agstart, JFS_SBI(pip->i_sb));
1359 dn_numag = JFS_SBI(pip->i_sb)->bmap->db_numag;
1360 if (agno < 0 || agno > dn_numag)
1361 return -EIO;
1362
1363 if (atomic_read(&JFS_SBI(pip->i_sb)->bmap->db_active[agno])) {
1364 /*
1365 * There is an open file actively growing. We want to
1366 * allocate new inodes from a different ag to avoid
1367 * fragmentation problems.
1368 */
1369 agno = dbNextAG(JFS_SBI(pip->i_sb)->ipbmap);
1370 AG_LOCK(imap, agno);
1371 goto tryag;
1372 }
1373
1374 inum = pip->i_ino + 1;
1375 ino = inum & (INOSPERIAG - 1);
1376
1377 /* back off the hint if it is outside of the iag */
1378 if (ino == 0)
1379 inum = pip->i_ino;
1380
1381 /* lock the AG inode map information */
1382 AG_LOCK(imap, agno);
1383
1384 /* Get read lock on imap inode */
1385 IREAD_LOCK(ipimap, RDWRLOCK_IMAP);
1386
1387 /* get the iag number and read the iag */
1388 iagno = INOTOIAG(inum);
1389 if ((rc = diIAGRead(imap, iagno, &mp))) {
1390 IREAD_UNLOCK(ipimap);
1391 AG_UNLOCK(imap, agno);
1392 return (rc);
1393 }
1394 iagp = (struct iag *) mp->data;
1395
1396 /* determine if new inode extent is allowed to be added to the iag.
1397 * new inode extent can be added to the iag if the ag
1398 * has less than 32 free disk inodes and the iag has free extents.
1399 */
1400 addext = (imap->im_agctl[agno].numfree < 32 && iagp->nfreeexts);
1401
1402 /*
1403 * try to allocate from the IAG
1404 */
1405 /* check if the inode may be allocated from the iag
1406 * (i.e. the inode has free inodes or new extent can be added).
1407 */
1408 if (iagp->nfreeinos || addext) {
1409 /* determine the extent number of the hint.
1410 */
1411 extno = ino >> L2INOSPEREXT;
1412
1413 /* check if the extent containing the hint has backed
1414 * inodes. if so, try to allocate within this extent.
1415 */
1416 if (addressPXD(&iagp->inoext[extno])) {
1417 bitno = ino & (INOSPEREXT - 1);
1418 if ((bitno =
1419 diFindFree(le32_to_cpu(iagp->wmap[extno]),
1420 bitno))
1421 < INOSPEREXT) {
1422 ino = (extno << L2INOSPEREXT) + bitno;
1423
1424 /* a free inode (bit) was found within this
1425 * extent, so allocate it.
1426 */
1427 rc = diAllocBit(imap, iagp, ino);
1428 IREAD_UNLOCK(ipimap);
1429 if (rc) {
1430 assert(rc == -EIO);
1431 } else {
1432 /* set the results of the allocation
1433 * and write the iag.
1434 */
1435 diInitInode(ip, iagno, ino, extno,
1436 iagp);
1437 mark_metapage_dirty(mp);
1438 }
1439 release_metapage(mp);
1440
1441 /* free the AG lock and return.
1442 */
1443 AG_UNLOCK(imap, agno);
1444 return (rc);
1445 }
1446
1447 if (!addext)
1448 extno =
1449 (extno ==
1450 EXTSPERIAG - 1) ? 0 : extno + 1;
1451 }
1452
1453 /*
1454 * no free inodes within the extent containing the hint.
1455 *
1456 * try to allocate from the backed extents following
1457 * hint or, if appropriate (i.e. addext is true), allocate
1458 * an extent of free inodes at or following the extent
1459 * containing the hint.
1460 *
1461 * the free inode and free extent summary maps are used
1462 * here, so determine the starting summary map position
1463 * and the number of words we'll have to examine. again,
1464 * the approach is to allocate following the hint, so we
1465 * might have to initially ignore prior bits of the summary
1466 * map that represent extents prior to the extent containing
1467 * the hint and later revisit these bits.
1468 */
1469 bitno = extno & (EXTSPERSUM - 1);
1470 nwords = (bitno == 0) ? SMAPSZ : SMAPSZ + 1;
1471 sword = extno >> L2EXTSPERSUM;
1472
1473 /* mask any prior bits for the starting words of the
1474 * summary map.
1475 */
1476 mask = (bitno == 0) ? 0 : (ONES << (EXTSPERSUM - bitno));
1477 inosmap = le32_to_cpu(iagp->inosmap[sword]) | mask;
1478 extsmap = le32_to_cpu(iagp->extsmap[sword]) | mask;
1479
1480 /* scan the free inode and free extent summary maps for
1481 * free resources.
1482 */
1483 for (i = 0; i < nwords; i++) {
1484 /* check if this word of the free inode summary
1485 * map describes an extent with free inodes.
1486 */
1487 if (~inosmap) {
1488 /* an extent with free inodes has been
1489 * found. determine the extent number
1490 * and the inode number within the extent.
1491 */
1492 rem = diFindFree(inosmap, 0);
1493 extno = (sword << L2EXTSPERSUM) + rem;
1494 rem = diFindFree(le32_to_cpu(iagp->wmap[extno]),
1495 0);
1496 if (rem >= INOSPEREXT) {
1497 IREAD_UNLOCK(ipimap);
1498 release_metapage(mp);
1499 AG_UNLOCK(imap, agno);
1500 jfs_error(ip->i_sb,
1501 "can't find free bit in wmap\n");
1502 return -EIO;
1503 }
1504
1505 /* determine the inode number within the
1506 * iag and allocate the inode from the
1507 * map.
1508 */
1509 ino = (extno << L2INOSPEREXT) + rem;
1510 rc = diAllocBit(imap, iagp, ino);
1511 IREAD_UNLOCK(ipimap);
1512 if (rc)
1513 assert(rc == -EIO);
1514 else {
1515 /* set the results of the allocation
1516 * and write the iag.
1517 */
1518 diInitInode(ip, iagno, ino, extno,
1519 iagp);
1520 mark_metapage_dirty(mp);
1521 }
1522 release_metapage(mp);
1523
1524 /* free the AG lock and return.
1525 */
1526 AG_UNLOCK(imap, agno);
1527 return (rc);
1528
1529 }
1530
1531 /* check if we may allocate an extent of free
1532 * inodes and whether this word of the free
1533 * extents summary map describes a free extent.
1534 */
1535 if (addext && ~extsmap) {
1536 /* a free extent has been found. determine
1537 * the extent number.
1538 */
1539 rem = diFindFree(extsmap, 0);
1540 extno = (sword << L2EXTSPERSUM) + rem;
1541
1542 /* allocate an extent of free inodes.
1543 */
1544 if ((rc = diNewExt(imap, iagp, extno))) {
1545 /* if there is no disk space for a
1546 * new extent, try to allocate the
1547 * disk inode from somewhere else.
1548 */
1549 if (rc == -ENOSPC)
1550 break;
1551
1552 assert(rc == -EIO);
1553 } else {
1554 /* set the results of the allocation
1555 * and write the iag.
1556 */
1557 diInitInode(ip, iagno,
1558 extno << L2INOSPEREXT,
1559 extno, iagp);
1560 mark_metapage_dirty(mp);
1561 }
1562 release_metapage(mp);
1563 /* free the imap inode & the AG lock & return.
1564 */
1565 IREAD_UNLOCK(ipimap);
1566 AG_UNLOCK(imap, agno);
1567 return (rc);
1568 }
1569
1570 /* move on to the next set of summary map words.
1571 */
1572 sword = (sword == SMAPSZ - 1) ? 0 : sword + 1;
1573 inosmap = le32_to_cpu(iagp->inosmap[sword]);
1574 extsmap = le32_to_cpu(iagp->extsmap[sword]);
1575 }
1576 }
1577 /* unlock imap inode */
1578 IREAD_UNLOCK(ipimap);
1579
1580 /* nothing doing in this iag, so release it. */
1581 release_metapage(mp);
1582
1583 tryag:
1584 /*
1585 * try to allocate anywhere within the same AG as the parent inode.
1586 */
1587 rc = diAllocAG(imap, agno, dir, ip);
1588
1589 AG_UNLOCK(imap, agno);
1590
1591 if (rc != -ENOSPC)
1592 return (rc);
1593
1594 /*
1595 * try to allocate in any AG.
1596 */
1597 return (diAllocAny(imap, agno, dir, ip));
1598 }
1599
1600
1601 /*
1602 * NAME: diAllocAG(imap,agno,dir,ip)
1603 *
1604 * FUNCTION: allocate a disk inode from the allocation group.
1605 *
1606 * this routine first determines if a new extent of free
1607 * inodes should be added for the allocation group, with
1608 * the current request satisfied from this extent. if this
1609 * is the case, an attempt will be made to do just that. if
1610 * this attempt fails or it has been determined that a new
1611 * extent should not be added, an attempt is made to satisfy
1612 * the request by allocating an existing (backed) free inode
1613 * from the allocation group.
1614 *
1615 * PRE CONDITION: Already have the AG lock for this AG.
1616 *
1617 * PARAMETERS:
1618 * imap - pointer to inode map control structure.
1619 * agno - allocation group to allocate from.
1620 * dir - 'true' if the new disk inode is for a directory.
1621 * ip - pointer to the new inode to be filled in on successful return
1622 * with the disk inode number allocated, its extent address
1623 * and the start of the ag.
1624 *
1625 * RETURN VALUES:
1626 * 0 - success.
1627 * -ENOSPC - insufficient disk resources.
1628 * -EIO - i/o error.
1629 */
1630 static int
diAllocAG(struct inomap * imap,int agno,bool dir,struct inode * ip)1631 diAllocAG(struct inomap * imap, int agno, bool dir, struct inode *ip)
1632 {
1633 int rc, addext, numfree, numinos;
1634
1635 /* get the number of free and the number of backed disk
1636 * inodes currently within the ag.
1637 */
1638 numfree = imap->im_agctl[agno].numfree;
1639 numinos = imap->im_agctl[agno].numinos;
1640
1641 if (numfree > numinos) {
1642 jfs_error(ip->i_sb, "numfree > numinos\n");
1643 return -EIO;
1644 }
1645
1646 /* determine if we should allocate a new extent of free inodes
1647 * within the ag: for directory inodes, add a new extent
1648 * if there are a small number of free inodes or number of free
1649 * inodes is a small percentage of the number of backed inodes.
1650 */
1651 if (dir)
1652 addext = (numfree < 64 ||
1653 (numfree < 256
1654 && ((numfree * 100) / numinos) <= 20));
1655 else
1656 addext = (numfree == 0);
1657
1658 /*
1659 * try to allocate a new extent of free inodes.
1660 */
1661 if (addext) {
1662 /* if free space is not available for this new extent, try
1663 * below to allocate a free and existing (already backed)
1664 * inode from the ag.
1665 */
1666 if ((rc = diAllocExt(imap, agno, ip)) != -ENOSPC)
1667 return (rc);
1668 }
1669
1670 /*
1671 * try to allocate an existing free inode from the ag.
1672 */
1673 return (diAllocIno(imap, agno, ip));
1674 }
1675
1676
1677 /*
1678 * NAME: diAllocAny(imap,agno,dir,iap)
1679 *
1680 * FUNCTION: allocate a disk inode from any other allocation group.
1681 *
1682 * this routine is called when an allocation attempt within
1683 * the primary allocation group has failed. if attempts to
1684 * allocate an inode from any allocation group other than the
1685 * specified primary group.
1686 *
1687 * PARAMETERS:
1688 * imap - pointer to inode map control structure.
1689 * agno - primary allocation group (to avoid).
1690 * dir - 'true' if the new disk inode is for a directory.
1691 * ip - pointer to a new inode to be filled in on successful return
1692 * with the disk inode number allocated, its extent address
1693 * and the start of the ag.
1694 *
1695 * RETURN VALUES:
1696 * 0 - success.
1697 * -ENOSPC - insufficient disk resources.
1698 * -EIO - i/o error.
1699 */
1700 static int
diAllocAny(struct inomap * imap,int agno,bool dir,struct inode * ip)1701 diAllocAny(struct inomap * imap, int agno, bool dir, struct inode *ip)
1702 {
1703 int ag, rc;
1704 int maxag = JFS_SBI(imap->im_ipimap->i_sb)->bmap->db_maxag;
1705
1706
1707 /* try to allocate from the ags following agno up to
1708 * the maximum ag number.
1709 */
1710 for (ag = agno + 1; ag <= maxag; ag++) {
1711 AG_LOCK(imap, ag);
1712
1713 rc = diAllocAG(imap, ag, dir, ip);
1714
1715 AG_UNLOCK(imap, ag);
1716
1717 if (rc != -ENOSPC)
1718 return (rc);
1719 }
1720
1721 /* try to allocate from the ags in front of agno.
1722 */
1723 for (ag = 0; ag < agno; ag++) {
1724 AG_LOCK(imap, ag);
1725
1726 rc = diAllocAG(imap, ag, dir, ip);
1727
1728 AG_UNLOCK(imap, ag);
1729
1730 if (rc != -ENOSPC)
1731 return (rc);
1732 }
1733
1734 /* no free disk inodes.
1735 */
1736 return -ENOSPC;
1737 }
1738
1739
1740 /*
1741 * NAME: diAllocIno(imap,agno,ip)
1742 *
1743 * FUNCTION: allocate a disk inode from the allocation group's free
1744 * inode list, returning an error if this free list is
1745 * empty (i.e. no iags on the list).
1746 *
1747 * allocation occurs from the first iag on the list using
1748 * the iag's free inode summary map to find the leftmost
1749 * free inode in the iag.
1750 *
1751 * PRE CONDITION: Already have AG lock for this AG.
1752 *
1753 * PARAMETERS:
1754 * imap - pointer to inode map control structure.
1755 * agno - allocation group.
1756 * ip - pointer to new inode to be filled in on successful return
1757 * with the disk inode number allocated, its extent address
1758 * and the start of the ag.
1759 *
1760 * RETURN VALUES:
1761 * 0 - success.
1762 * -ENOSPC - insufficient disk resources.
1763 * -EIO - i/o error.
1764 */
diAllocIno(struct inomap * imap,int agno,struct inode * ip)1765 static int diAllocIno(struct inomap * imap, int agno, struct inode *ip)
1766 {
1767 int iagno, ino, rc, rem, extno, sword;
1768 struct metapage *mp;
1769 struct iag *iagp;
1770
1771 /* check if there are iags on the ag's free inode list.
1772 */
1773 if ((iagno = imap->im_agctl[agno].inofree) < 0)
1774 return -ENOSPC;
1775
1776 /* obtain read lock on imap inode */
1777 IREAD_LOCK(imap->im_ipimap, RDWRLOCK_IMAP);
1778
1779 /* read the iag at the head of the list.
1780 */
1781 if ((rc = diIAGRead(imap, iagno, &mp))) {
1782 IREAD_UNLOCK(imap->im_ipimap);
1783 return (rc);
1784 }
1785 iagp = (struct iag *) mp->data;
1786
1787 /* better be free inodes in this iag if it is on the
1788 * list.
1789 */
1790 if (!iagp->nfreeinos) {
1791 IREAD_UNLOCK(imap->im_ipimap);
1792 release_metapage(mp);
1793 jfs_error(ip->i_sb, "nfreeinos = 0, but iag on freelist\n");
1794 return -EIO;
1795 }
1796
1797 /* scan the free inode summary map to find an extent
1798 * with free inodes.
1799 */
1800 for (sword = 0;; sword++) {
1801 if (sword >= SMAPSZ) {
1802 IREAD_UNLOCK(imap->im_ipimap);
1803 release_metapage(mp);
1804 jfs_error(ip->i_sb,
1805 "free inode not found in summary map\n");
1806 return -EIO;
1807 }
1808
1809 if (~iagp->inosmap[sword])
1810 break;
1811 }
1812
1813 /* found a extent with free inodes. determine
1814 * the extent number.
1815 */
1816 rem = diFindFree(le32_to_cpu(iagp->inosmap[sword]), 0);
1817 if (rem >= EXTSPERSUM) {
1818 IREAD_UNLOCK(imap->im_ipimap);
1819 release_metapage(mp);
1820 jfs_error(ip->i_sb, "no free extent found\n");
1821 return -EIO;
1822 }
1823 extno = (sword << L2EXTSPERSUM) + rem;
1824
1825 /* find the first free inode in the extent.
1826 */
1827 rem = diFindFree(le32_to_cpu(iagp->wmap[extno]), 0);
1828 if (rem >= INOSPEREXT) {
1829 IREAD_UNLOCK(imap->im_ipimap);
1830 release_metapage(mp);
1831 jfs_error(ip->i_sb, "free inode not found\n");
1832 return -EIO;
1833 }
1834
1835 /* compute the inode number within the iag.
1836 */
1837 ino = (extno << L2INOSPEREXT) + rem;
1838
1839 /* allocate the inode.
1840 */
1841 rc = diAllocBit(imap, iagp, ino);
1842 IREAD_UNLOCK(imap->im_ipimap);
1843 if (rc) {
1844 release_metapage(mp);
1845 return (rc);
1846 }
1847
1848 /* set the results of the allocation and write the iag.
1849 */
1850 diInitInode(ip, iagno, ino, extno, iagp);
1851 write_metapage(mp);
1852
1853 return (0);
1854 }
1855
1856
1857 /*
1858 * NAME: diAllocExt(imap,agno,ip)
1859 *
1860 * FUNCTION: add a new extent of free inodes to an iag, allocating
1861 * an inode from this extent to satisfy the current allocation
1862 * request.
1863 *
1864 * this routine first tries to find an existing iag with free
1865 * extents through the ag free extent list. if list is not
1866 * empty, the head of the list will be selected as the home
1867 * of the new extent of free inodes. otherwise (the list is
1868 * empty), a new iag will be allocated for the ag to contain
1869 * the extent.
1870 *
1871 * once an iag has been selected, the free extent summary map
1872 * is used to locate a free extent within the iag and diNewExt()
1873 * is called to initialize the extent, with initialization
1874 * including the allocation of the first inode of the extent
1875 * for the purpose of satisfying this request.
1876 *
1877 * PARAMETERS:
1878 * imap - pointer to inode map control structure.
1879 * agno - allocation group number.
1880 * ip - pointer to new inode to be filled in on successful return
1881 * with the disk inode number allocated, its extent address
1882 * and the start of the ag.
1883 *
1884 * RETURN VALUES:
1885 * 0 - success.
1886 * -ENOSPC - insufficient disk resources.
1887 * -EIO - i/o error.
1888 */
diAllocExt(struct inomap * imap,int agno,struct inode * ip)1889 static int diAllocExt(struct inomap * imap, int agno, struct inode *ip)
1890 {
1891 int rem, iagno, sword, extno, rc;
1892 struct metapage *mp;
1893 struct iag *iagp;
1894
1895 /* check if the ag has any iags with free extents. if not,
1896 * allocate a new iag for the ag.
1897 */
1898 if ((iagno = imap->im_agctl[agno].extfree) < 0) {
1899 /* If successful, diNewIAG will obtain the read lock on the
1900 * imap inode.
1901 */
1902 if ((rc = diNewIAG(imap, &iagno, agno, &mp))) {
1903 return (rc);
1904 }
1905 iagp = (struct iag *) mp->data;
1906
1907 /* set the ag number if this a brand new iag
1908 */
1909 iagp->agstart =
1910 cpu_to_le64(AGTOBLK(agno, imap->im_ipimap));
1911 } else {
1912 /* read the iag.
1913 */
1914 IREAD_LOCK(imap->im_ipimap, RDWRLOCK_IMAP);
1915 if ((rc = diIAGRead(imap, iagno, &mp))) {
1916 IREAD_UNLOCK(imap->im_ipimap);
1917 jfs_error(ip->i_sb, "error reading iag\n");
1918 return rc;
1919 }
1920 iagp = (struct iag *) mp->data;
1921 }
1922
1923 /* using the free extent summary map, find a free extent.
1924 */
1925 for (sword = 0;; sword++) {
1926 if (sword >= SMAPSZ) {
1927 release_metapage(mp);
1928 IREAD_UNLOCK(imap->im_ipimap);
1929 jfs_error(ip->i_sb, "free ext summary map not found\n");
1930 return -EIO;
1931 }
1932 if (~iagp->extsmap[sword])
1933 break;
1934 }
1935
1936 /* determine the extent number of the free extent.
1937 */
1938 rem = diFindFree(le32_to_cpu(iagp->extsmap[sword]), 0);
1939 if (rem >= EXTSPERSUM) {
1940 release_metapage(mp);
1941 IREAD_UNLOCK(imap->im_ipimap);
1942 jfs_error(ip->i_sb, "free extent not found\n");
1943 return -EIO;
1944 }
1945 extno = (sword << L2EXTSPERSUM) + rem;
1946
1947 /* initialize the new extent.
1948 */
1949 rc = diNewExt(imap, iagp, extno);
1950 IREAD_UNLOCK(imap->im_ipimap);
1951 if (rc) {
1952 /* something bad happened. if a new iag was allocated,
1953 * place it back on the inode map's iag free list, and
1954 * clear the ag number information.
1955 */
1956 if (iagp->nfreeexts == cpu_to_le32(EXTSPERIAG)) {
1957 IAGFREE_LOCK(imap);
1958 iagp->iagfree = cpu_to_le32(imap->im_freeiag);
1959 imap->im_freeiag = iagno;
1960 IAGFREE_UNLOCK(imap);
1961 }
1962 write_metapage(mp);
1963 return (rc);
1964 }
1965
1966 /* set the results of the allocation and write the iag.
1967 */
1968 diInitInode(ip, iagno, extno << L2INOSPEREXT, extno, iagp);
1969
1970 write_metapage(mp);
1971
1972 return (0);
1973 }
1974
1975
1976 /*
1977 * NAME: diAllocBit(imap,iagp,ino)
1978 *
1979 * FUNCTION: allocate a backed inode from an iag.
1980 *
1981 * this routine performs the mechanics of allocating a
1982 * specified inode from a backed extent.
1983 *
1984 * if the inode to be allocated represents the last free
1985 * inode within the iag, the iag will be removed from the
1986 * ag free inode list.
1987 *
1988 * a careful update approach is used to provide consistency
1989 * in the face of updates to multiple buffers. under this
1990 * approach, all required buffers are obtained before making
1991 * any updates and are held all are updates are complete.
1992 *
1993 * PRE CONDITION: Already have buffer lock on iagp. Already have AG lock on
1994 * this AG. Must have read lock on imap inode.
1995 *
1996 * PARAMETERS:
1997 * imap - pointer to inode map control structure.
1998 * iagp - pointer to iag.
1999 * ino - inode number to be allocated within the iag.
2000 *
2001 * RETURN VALUES:
2002 * 0 - success.
2003 * -ENOSPC - insufficient disk resources.
2004 * -EIO - i/o error.
2005 */
diAllocBit(struct inomap * imap,struct iag * iagp,int ino)2006 static int diAllocBit(struct inomap * imap, struct iag * iagp, int ino)
2007 {
2008 int extno, bitno, agno, sword, rc;
2009 struct metapage *amp = NULL, *bmp = NULL;
2010 struct iag *aiagp = NULL, *biagp = NULL;
2011 u32 mask;
2012
2013 /* check if this is the last free inode within the iag.
2014 * if so, it will have to be removed from the ag free
2015 * inode list, so get the iags preceding and following
2016 * it on the list.
2017 */
2018 if (iagp->nfreeinos == cpu_to_le32(1)) {
2019 if ((int) le32_to_cpu(iagp->inofreefwd) >= 0) {
2020 if ((rc =
2021 diIAGRead(imap, le32_to_cpu(iagp->inofreefwd),
2022 &)))
2023 return (rc);
2024 aiagp = (struct iag *) amp->data;
2025 }
2026
2027 if ((int) le32_to_cpu(iagp->inofreeback) >= 0) {
2028 if ((rc =
2029 diIAGRead(imap,
2030 le32_to_cpu(iagp->inofreeback),
2031 &bmp))) {
2032 if (amp)
2033 release_metapage(amp);
2034 return (rc);
2035 }
2036 biagp = (struct iag *) bmp->data;
2037 }
2038 }
2039
2040 /* get the ag number, extent number, inode number within
2041 * the extent.
2042 */
2043 agno = BLKTOAG(le64_to_cpu(iagp->agstart), JFS_SBI(imap->im_ipimap->i_sb));
2044 extno = ino >> L2INOSPEREXT;
2045 bitno = ino & (INOSPEREXT - 1);
2046
2047 /* compute the mask for setting the map.
2048 */
2049 mask = HIGHORDER >> bitno;
2050
2051 /* the inode should be free and backed.
2052 */
2053 if (((le32_to_cpu(iagp->pmap[extno]) & mask) != 0) ||
2054 ((le32_to_cpu(iagp->wmap[extno]) & mask) != 0) ||
2055 (addressPXD(&iagp->inoext[extno]) == 0)) {
2056 if (amp)
2057 release_metapage(amp);
2058 if (bmp)
2059 release_metapage(bmp);
2060
2061 jfs_error(imap->im_ipimap->i_sb, "iag inconsistent\n");
2062 return -EIO;
2063 }
2064
2065 /* mark the inode as allocated in the working map.
2066 */
2067 iagp->wmap[extno] |= cpu_to_le32(mask);
2068
2069 /* check if all inodes within the extent are now
2070 * allocated. if so, update the free inode summary
2071 * map to reflect this.
2072 */
2073 if (iagp->wmap[extno] == cpu_to_le32(ONES)) {
2074 sword = extno >> L2EXTSPERSUM;
2075 bitno = extno & (EXTSPERSUM - 1);
2076 iagp->inosmap[sword] |= cpu_to_le32(HIGHORDER >> bitno);
2077 }
2078
2079 /* if this was the last free inode in the iag, remove the
2080 * iag from the ag free inode list.
2081 */
2082 if (iagp->nfreeinos == cpu_to_le32(1)) {
2083 if (amp) {
2084 aiagp->inofreeback = iagp->inofreeback;
2085 write_metapage(amp);
2086 }
2087
2088 if (bmp) {
2089 biagp->inofreefwd = iagp->inofreefwd;
2090 write_metapage(bmp);
2091 } else {
2092 imap->im_agctl[agno].inofree =
2093 le32_to_cpu(iagp->inofreefwd);
2094 }
2095 iagp->inofreefwd = iagp->inofreeback = cpu_to_le32(-1);
2096 }
2097
2098 /* update the free inode count at the iag, ag, inode
2099 * map levels.
2100 */
2101 le32_add_cpu(&iagp->nfreeinos, -1);
2102 imap->im_agctl[agno].numfree -= 1;
2103 atomic_dec(&imap->im_numfree);
2104
2105 return (0);
2106 }
2107
2108
2109 /*
2110 * NAME: diNewExt(imap,iagp,extno)
2111 *
2112 * FUNCTION: initialize a new extent of inodes for an iag, allocating
2113 * the first inode of the extent for use for the current
2114 * allocation request.
2115 *
2116 * disk resources are allocated for the new extent of inodes
2117 * and the inodes themselves are initialized to reflect their
2118 * existence within the extent (i.e. their inode numbers and
2119 * inode extent addresses are set) and their initial state
2120 * (mode and link count are set to zero).
2121 *
2122 * if the iag is new, it is not yet on an ag extent free list
2123 * but will now be placed on this list.
2124 *
2125 * if the allocation of the new extent causes the iag to
2126 * have no free extent, the iag will be removed from the
2127 * ag extent free list.
2128 *
2129 * if the iag has no free backed inodes, it will be placed
2130 * on the ag free inode list, since the addition of the new
2131 * extent will now cause it to have free inodes.
2132 *
2133 * a careful update approach is used to provide consistency
2134 * (i.e. list consistency) in the face of updates to multiple
2135 * buffers. under this approach, all required buffers are
2136 * obtained before making any updates and are held until all
2137 * updates are complete.
2138 *
2139 * PRE CONDITION: Already have buffer lock on iagp. Already have AG lock on
2140 * this AG. Must have read lock on imap inode.
2141 *
2142 * PARAMETERS:
2143 * imap - pointer to inode map control structure.
2144 * iagp - pointer to iag.
2145 * extno - extent number.
2146 *
2147 * RETURN VALUES:
2148 * 0 - success.
2149 * -ENOSPC - insufficient disk resources.
2150 * -EIO - i/o error.
2151 */
diNewExt(struct inomap * imap,struct iag * iagp,int extno)2152 static int diNewExt(struct inomap * imap, struct iag * iagp, int extno)
2153 {
2154 int agno, iagno, fwd, back, freei = 0, sword, rc;
2155 struct iag *aiagp = NULL, *biagp = NULL, *ciagp = NULL;
2156 struct metapage *amp, *bmp, *cmp, *dmp;
2157 struct inode *ipimap;
2158 s64 blkno, hint;
2159 int i, j;
2160 u32 mask;
2161 ino_t ino;
2162 struct dinode *dp;
2163 struct jfs_sb_info *sbi;
2164
2165 /* better have free extents.
2166 */
2167 if (!iagp->nfreeexts) {
2168 jfs_error(imap->im_ipimap->i_sb, "no free extents\n");
2169 return -EIO;
2170 }
2171
2172 /* get the inode map inode.
2173 */
2174 ipimap = imap->im_ipimap;
2175 sbi = JFS_SBI(ipimap->i_sb);
2176
2177 amp = bmp = cmp = NULL;
2178
2179 /* get the ag and iag numbers for this iag.
2180 */
2181 agno = BLKTOAG(le64_to_cpu(iagp->agstart), sbi);
2182 if (agno >= MAXAG || agno < 0)
2183 return -EIO;
2184
2185 iagno = le32_to_cpu(iagp->iagnum);
2186
2187 /* check if this is the last free extent within the
2188 * iag. if so, the iag must be removed from the ag
2189 * free extent list, so get the iags preceding and
2190 * following the iag on this list.
2191 */
2192 if (iagp->nfreeexts == cpu_to_le32(1)) {
2193 if ((fwd = le32_to_cpu(iagp->extfreefwd)) >= 0) {
2194 if ((rc = diIAGRead(imap, fwd, &)))
2195 return (rc);
2196 aiagp = (struct iag *) amp->data;
2197 }
2198
2199 if ((back = le32_to_cpu(iagp->extfreeback)) >= 0) {
2200 if ((rc = diIAGRead(imap, back, &bmp)))
2201 goto error_out;
2202 biagp = (struct iag *) bmp->data;
2203 }
2204 } else {
2205 /* the iag has free extents. if all extents are free
2206 * (as is the case for a newly allocated iag), the iag
2207 * must be added to the ag free extent list, so get
2208 * the iag at the head of the list in preparation for
2209 * adding this iag to this list.
2210 */
2211 fwd = back = -1;
2212 if (iagp->nfreeexts == cpu_to_le32(EXTSPERIAG)) {
2213 if ((fwd = imap->im_agctl[agno].extfree) >= 0) {
2214 if ((rc = diIAGRead(imap, fwd, &)))
2215 goto error_out;
2216 aiagp = (struct iag *) amp->data;
2217 }
2218 }
2219 }
2220
2221 /* check if the iag has no free inodes. if so, the iag
2222 * will have to be added to the ag free inode list, so get
2223 * the iag at the head of the list in preparation for
2224 * adding this iag to this list. in doing this, we must
2225 * check if we already have the iag at the head of
2226 * the list in hand.
2227 */
2228 if (iagp->nfreeinos == 0) {
2229 freei = imap->im_agctl[agno].inofree;
2230
2231 if (freei >= 0) {
2232 if (freei == fwd) {
2233 ciagp = aiagp;
2234 } else if (freei == back) {
2235 ciagp = biagp;
2236 } else {
2237 if ((rc = diIAGRead(imap, freei, &cmp)))
2238 goto error_out;
2239 ciagp = (struct iag *) cmp->data;
2240 }
2241 if (ciagp == NULL) {
2242 jfs_error(imap->im_ipimap->i_sb,
2243 "ciagp == NULL\n");
2244 rc = -EIO;
2245 goto error_out;
2246 }
2247 }
2248 }
2249
2250 /* allocate disk space for the inode extent.
2251 */
2252 if ((extno == 0) || (addressPXD(&iagp->inoext[extno - 1]) == 0))
2253 hint = ((s64) agno << sbi->bmap->db_agl2size) - 1;
2254 else
2255 hint = addressPXD(&iagp->inoext[extno - 1]) +
2256 lengthPXD(&iagp->inoext[extno - 1]) - 1;
2257
2258 if ((rc = dbAlloc(ipimap, hint, (s64) imap->im_nbperiext, &blkno)))
2259 goto error_out;
2260
2261 /* compute the inode number of the first inode within the
2262 * extent.
2263 */
2264 ino = (iagno << L2INOSPERIAG) + (extno << L2INOSPEREXT);
2265
2266 /* initialize the inodes within the newly allocated extent a
2267 * page at a time.
2268 */
2269 for (i = 0; i < imap->im_nbperiext; i += sbi->nbperpage) {
2270 /* get a buffer for this page of disk inodes.
2271 */
2272 dmp = get_metapage(ipimap, blkno + i, PSIZE, 1);
2273 if (dmp == NULL) {
2274 rc = -EIO;
2275 goto error_out;
2276 }
2277 dp = (struct dinode *) dmp->data;
2278
2279 /* initialize the inode number, mode, link count and
2280 * inode extent address.
2281 */
2282 for (j = 0; j < INOSPERPAGE; j++, dp++, ino++) {
2283 dp->di_inostamp = cpu_to_le32(sbi->inostamp);
2284 dp->di_number = cpu_to_le32(ino);
2285 dp->di_fileset = cpu_to_le32(FILESYSTEM_I);
2286 dp->di_mode = 0;
2287 dp->di_nlink = 0;
2288 PXDaddress(&(dp->di_ixpxd), blkno);
2289 PXDlength(&(dp->di_ixpxd), imap->im_nbperiext);
2290 }
2291 write_metapage(dmp);
2292 }
2293
2294 /* if this is the last free extent within the iag, remove the
2295 * iag from the ag free extent list.
2296 */
2297 if (iagp->nfreeexts == cpu_to_le32(1)) {
2298 if (fwd >= 0)
2299 aiagp->extfreeback = iagp->extfreeback;
2300
2301 if (back >= 0)
2302 biagp->extfreefwd = iagp->extfreefwd;
2303 else
2304 imap->im_agctl[agno].extfree =
2305 le32_to_cpu(iagp->extfreefwd);
2306
2307 iagp->extfreefwd = iagp->extfreeback = cpu_to_le32(-1);
2308 } else {
2309 /* if the iag has all free extents (newly allocated iag),
2310 * add the iag to the ag free extent list.
2311 */
2312 if (iagp->nfreeexts == cpu_to_le32(EXTSPERIAG)) {
2313 if (fwd >= 0)
2314 aiagp->extfreeback = cpu_to_le32(iagno);
2315
2316 iagp->extfreefwd = cpu_to_le32(fwd);
2317 iagp->extfreeback = cpu_to_le32(-1);
2318 imap->im_agctl[agno].extfree = iagno;
2319 }
2320 }
2321
2322 /* if the iag has no free inodes, add the iag to the
2323 * ag free inode list.
2324 */
2325 if (iagp->nfreeinos == 0) {
2326 if (freei >= 0)
2327 ciagp->inofreeback = cpu_to_le32(iagno);
2328
2329 iagp->inofreefwd =
2330 cpu_to_le32(imap->im_agctl[agno].inofree);
2331 iagp->inofreeback = cpu_to_le32(-1);
2332 imap->im_agctl[agno].inofree = iagno;
2333 }
2334
2335 /* initialize the extent descriptor of the extent. */
2336 PXDlength(&iagp->inoext[extno], imap->im_nbperiext);
2337 PXDaddress(&iagp->inoext[extno], blkno);
2338
2339 /* initialize the working and persistent map of the extent.
2340 * the working map will be initialized such that
2341 * it indicates the first inode of the extent is allocated.
2342 */
2343 iagp->wmap[extno] = cpu_to_le32(HIGHORDER);
2344 iagp->pmap[extno] = 0;
2345
2346 /* update the free inode and free extent summary maps
2347 * for the extent to indicate the extent has free inodes
2348 * and no longer represents a free extent.
2349 */
2350 sword = extno >> L2EXTSPERSUM;
2351 mask = HIGHORDER >> (extno & (EXTSPERSUM - 1));
2352 iagp->extsmap[sword] |= cpu_to_le32(mask);
2353 iagp->inosmap[sword] &= cpu_to_le32(~mask);
2354
2355 /* update the free inode and free extent counts for the
2356 * iag.
2357 */
2358 le32_add_cpu(&iagp->nfreeinos, (INOSPEREXT - 1));
2359 le32_add_cpu(&iagp->nfreeexts, -1);
2360
2361 /* update the free and backed inode counts for the ag.
2362 */
2363 imap->im_agctl[agno].numfree += (INOSPEREXT - 1);
2364 imap->im_agctl[agno].numinos += INOSPEREXT;
2365
2366 /* update the free and backed inode counts for the inode map.
2367 */
2368 atomic_add(INOSPEREXT - 1, &imap->im_numfree);
2369 atomic_add(INOSPEREXT, &imap->im_numinos);
2370
2371 /* write the iags.
2372 */
2373 if (amp)
2374 write_metapage(amp);
2375 if (bmp)
2376 write_metapage(bmp);
2377 if (cmp)
2378 write_metapage(cmp);
2379
2380 return (0);
2381
2382 error_out:
2383
2384 /* release the iags.
2385 */
2386 if (amp)
2387 release_metapage(amp);
2388 if (bmp)
2389 release_metapage(bmp);
2390 if (cmp)
2391 release_metapage(cmp);
2392
2393 return (rc);
2394 }
2395
2396
2397 /*
2398 * NAME: diNewIAG(imap,iagnop,agno)
2399 *
2400 * FUNCTION: allocate a new iag for an allocation group.
2401 *
2402 * first tries to allocate the iag from the inode map
2403 * iagfree list:
2404 * if the list has free iags, the head of the list is removed
2405 * and returned to satisfy the request.
2406 * if the inode map's iag free list is empty, the inode map
2407 * is extended to hold a new iag. this new iag is initialized
2408 * and returned to satisfy the request.
2409 *
2410 * PARAMETERS:
2411 * imap - pointer to inode map control structure.
2412 * iagnop - pointer to an iag number set with the number of the
2413 * newly allocated iag upon successful return.
2414 * agno - allocation group number.
2415 * bpp - Buffer pointer to be filled in with new IAG's buffer
2416 *
2417 * RETURN VALUES:
2418 * 0 - success.
2419 * -ENOSPC - insufficient disk resources.
2420 * -EIO - i/o error.
2421 *
2422 * serialization:
2423 * AG lock held on entry/exit;
2424 * write lock on the map is held inside;
2425 * read lock on the map is held on successful completion;
2426 *
2427 * note: new iag transaction:
2428 * . synchronously write iag;
2429 * . write log of xtree and inode of imap;
2430 * . commit;
2431 * . synchronous write of xtree (right to left, bottom to top);
2432 * . at start of logredo(): init in-memory imap with one additional iag page;
2433 * . at end of logredo(): re-read imap inode to determine
2434 * new imap size;
2435 */
2436 static int
diNewIAG(struct inomap * imap,int * iagnop,int agno,struct metapage ** mpp)2437 diNewIAG(struct inomap * imap, int *iagnop, int agno, struct metapage ** mpp)
2438 {
2439 int rc;
2440 int iagno, i, xlen;
2441 struct inode *ipimap;
2442 struct super_block *sb;
2443 struct jfs_sb_info *sbi;
2444 struct metapage *mp;
2445 struct iag *iagp;
2446 s64 xaddr = 0;
2447 s64 blkno;
2448 tid_t tid;
2449 struct inode *iplist[1];
2450
2451 /* pick up pointers to the inode map and mount inodes */
2452 ipimap = imap->im_ipimap;
2453 sb = ipimap->i_sb;
2454 sbi = JFS_SBI(sb);
2455
2456 /* acquire the free iag lock */
2457 IAGFREE_LOCK(imap);
2458
2459 /* if there are any iags on the inode map free iag list,
2460 * allocate the iag from the head of the list.
2461 */
2462 if (imap->im_freeiag >= 0) {
2463 /* pick up the iag number at the head of the list */
2464 iagno = imap->im_freeiag;
2465
2466 /* determine the logical block number of the iag */
2467 blkno = IAGTOLBLK(iagno, sbi->l2nbperpage);
2468 } else {
2469 /* no free iags. the inode map will have to be extented
2470 * to include a new iag.
2471 */
2472
2473 /* acquire inode map lock */
2474 IWRITE_LOCK(ipimap, RDWRLOCK_IMAP);
2475
2476 if (ipimap->i_size >> L2PSIZE != imap->im_nextiag + 1) {
2477 IWRITE_UNLOCK(ipimap);
2478 IAGFREE_UNLOCK(imap);
2479 jfs_error(imap->im_ipimap->i_sb,
2480 "ipimap->i_size is wrong\n");
2481 return -EIO;
2482 }
2483
2484
2485 /* get the next available iag number */
2486 iagno = imap->im_nextiag;
2487
2488 /* make sure that we have not exceeded the maximum inode
2489 * number limit.
2490 */
2491 if (iagno > (MAXIAGS - 1)) {
2492 /* release the inode map lock */
2493 IWRITE_UNLOCK(ipimap);
2494
2495 rc = -ENOSPC;
2496 goto out;
2497 }
2498
2499 /*
2500 * synchronously append new iag page.
2501 */
2502 /* determine the logical address of iag page to append */
2503 blkno = IAGTOLBLK(iagno, sbi->l2nbperpage);
2504
2505 /* Allocate extent for new iag page */
2506 xlen = sbi->nbperpage;
2507 if ((rc = dbAlloc(ipimap, 0, (s64) xlen, &xaddr))) {
2508 /* release the inode map lock */
2509 IWRITE_UNLOCK(ipimap);
2510
2511 goto out;
2512 }
2513
2514 /*
2515 * start transaction of update of the inode map
2516 * addressing structure pointing to the new iag page;
2517 */
2518 tid = txBegin(sb, COMMIT_FORCE);
2519 mutex_lock(&JFS_IP(ipimap)->commit_mutex);
2520
2521 /* update the inode map addressing structure to point to it */
2522 if ((rc =
2523 xtInsert(tid, ipimap, 0, blkno, xlen, &xaddr, 0))) {
2524 txEnd(tid);
2525 mutex_unlock(&JFS_IP(ipimap)->commit_mutex);
2526 /* Free the blocks allocated for the iag since it was
2527 * not successfully added to the inode map
2528 */
2529 dbFree(ipimap, xaddr, (s64) xlen);
2530
2531 /* release the inode map lock */
2532 IWRITE_UNLOCK(ipimap);
2533
2534 goto out;
2535 }
2536
2537 /* update the inode map's inode to reflect the extension */
2538 ipimap->i_size += PSIZE;
2539 inode_add_bytes(ipimap, PSIZE);
2540
2541 /* assign a buffer for the page */
2542 mp = get_metapage(ipimap, blkno, PSIZE, 0);
2543 if (!mp) {
2544 /*
2545 * This is very unlikely since we just created the
2546 * extent, but let's try to handle it correctly
2547 */
2548 xtTruncate(tid, ipimap, ipimap->i_size - PSIZE,
2549 COMMIT_PWMAP);
2550
2551 txAbort(tid, 0);
2552 txEnd(tid);
2553 mutex_unlock(&JFS_IP(ipimap)->commit_mutex);
2554
2555 /* release the inode map lock */
2556 IWRITE_UNLOCK(ipimap);
2557
2558 rc = -EIO;
2559 goto out;
2560 }
2561 iagp = (struct iag *) mp->data;
2562
2563 /* init the iag */
2564 memset(iagp, 0, sizeof(struct iag));
2565 iagp->iagnum = cpu_to_le32(iagno);
2566 iagp->inofreefwd = iagp->inofreeback = cpu_to_le32(-1);
2567 iagp->extfreefwd = iagp->extfreeback = cpu_to_le32(-1);
2568 iagp->iagfree = cpu_to_le32(-1);
2569 iagp->nfreeinos = 0;
2570 iagp->nfreeexts = cpu_to_le32(EXTSPERIAG);
2571
2572 /* initialize the free inode summary map (free extent
2573 * summary map initialization handled by bzero).
2574 */
2575 for (i = 0; i < SMAPSZ; i++)
2576 iagp->inosmap[i] = cpu_to_le32(ONES);
2577
2578 /*
2579 * Write and sync the metapage
2580 */
2581 flush_metapage(mp);
2582
2583 /*
2584 * txCommit(COMMIT_FORCE) will synchronously write address
2585 * index pages and inode after commit in careful update order
2586 * of address index pages (right to left, bottom up);
2587 */
2588 iplist[0] = ipimap;
2589 rc = txCommit(tid, 1, &iplist[0], COMMIT_FORCE);
2590
2591 txEnd(tid);
2592 mutex_unlock(&JFS_IP(ipimap)->commit_mutex);
2593
2594 duplicateIXtree(sb, blkno, xlen, &xaddr);
2595
2596 /* update the next available iag number */
2597 imap->im_nextiag += 1;
2598
2599 /* Add the iag to the iag free list so we don't lose the iag
2600 * if a failure happens now.
2601 */
2602 imap->im_freeiag = iagno;
2603
2604 /* Until we have logredo working, we want the imap inode &
2605 * control page to be up to date.
2606 */
2607 diSync(ipimap);
2608
2609 /* release the inode map lock */
2610 IWRITE_UNLOCK(ipimap);
2611 }
2612
2613 /* obtain read lock on map */
2614 IREAD_LOCK(ipimap, RDWRLOCK_IMAP);
2615
2616 /* read the iag */
2617 if ((rc = diIAGRead(imap, iagno, &mp))) {
2618 IREAD_UNLOCK(ipimap);
2619 rc = -EIO;
2620 goto out;
2621 }
2622 iagp = (struct iag *) mp->data;
2623
2624 /* remove the iag from the iag free list */
2625 imap->im_freeiag = le32_to_cpu(iagp->iagfree);
2626 iagp->iagfree = cpu_to_le32(-1);
2627
2628 /* set the return iag number and buffer pointer */
2629 *iagnop = iagno;
2630 *mpp = mp;
2631
2632 out:
2633 /* release the iag free lock */
2634 IAGFREE_UNLOCK(imap);
2635
2636 return (rc);
2637 }
2638
2639 /*
2640 * NAME: diIAGRead()
2641 *
2642 * FUNCTION: get the buffer for the specified iag within a fileset
2643 * or aggregate inode map.
2644 *
2645 * PARAMETERS:
2646 * imap - pointer to inode map control structure.
2647 * iagno - iag number.
2648 * bpp - point to buffer pointer to be filled in on successful
2649 * exit.
2650 *
2651 * SERIALIZATION:
2652 * must have read lock on imap inode
2653 * (When called by diExtendFS, the filesystem is quiesced, therefore
2654 * the read lock is unnecessary.)
2655 *
2656 * RETURN VALUES:
2657 * 0 - success.
2658 * -EIO - i/o error.
2659 */
diIAGRead(struct inomap * imap,int iagno,struct metapage ** mpp)2660 static int diIAGRead(struct inomap * imap, int iagno, struct metapage ** mpp)
2661 {
2662 struct inode *ipimap = imap->im_ipimap;
2663 s64 blkno;
2664
2665 /* compute the logical block number of the iag. */
2666 blkno = IAGTOLBLK(iagno, JFS_SBI(ipimap->i_sb)->l2nbperpage);
2667
2668 /* read the iag. */
2669 *mpp = read_metapage(ipimap, blkno, PSIZE, 0);
2670 if (*mpp == NULL) {
2671 return -EIO;
2672 }
2673
2674 return (0);
2675 }
2676
2677 /*
2678 * NAME: diFindFree()
2679 *
2680 * FUNCTION: find the first free bit in a word starting at
2681 * the specified bit position.
2682 *
2683 * PARAMETERS:
2684 * word - word to be examined.
2685 * start - starting bit position.
2686 *
2687 * RETURN VALUES:
2688 * bit position of first free bit in the word or 32 if
2689 * no free bits were found.
2690 */
diFindFree(u32 word,int start)2691 static int diFindFree(u32 word, int start)
2692 {
2693 int bitno;
2694 assert(start < 32);
2695 /* scan the word for the first free bit. */
2696 for (word <<= start, bitno = start; bitno < 32;
2697 bitno++, word <<= 1) {
2698 if ((word & HIGHORDER) == 0)
2699 break;
2700 }
2701 return (bitno);
2702 }
2703
2704 /*
2705 * NAME: diUpdatePMap()
2706 *
2707 * FUNCTION: Update the persistent map in an IAG for the allocation or
2708 * freeing of the specified inode.
2709 *
2710 * PRE CONDITIONS: Working map has already been updated for allocate.
2711 *
2712 * PARAMETERS:
2713 * ipimap - Incore inode map inode
2714 * inum - Number of inode to mark in permanent map
2715 * is_free - If 'true' indicates inode should be marked freed, otherwise
2716 * indicates inode should be marked allocated.
2717 *
2718 * RETURN VALUES:
2719 * 0 for success
2720 */
2721 int
diUpdatePMap(struct inode * ipimap,unsigned long inum,bool is_free,struct tblock * tblk)2722 diUpdatePMap(struct inode *ipimap,
2723 unsigned long inum, bool is_free, struct tblock * tblk)
2724 {
2725 int rc;
2726 struct iag *iagp;
2727 struct metapage *mp;
2728 int iagno, ino, extno, bitno;
2729 struct inomap *imap;
2730 u32 mask;
2731 struct jfs_log *log;
2732 int lsn, difft, diffp;
2733 unsigned long flags;
2734
2735 imap = JFS_IP(ipimap)->i_imap;
2736 /* get the iag number containing the inode */
2737 iagno = INOTOIAG(inum);
2738 /* make sure that the iag is contained within the map */
2739 if (iagno >= imap->im_nextiag) {
2740 jfs_error(ipimap->i_sb, "the iag is outside the map\n");
2741 return -EIO;
2742 }
2743 /* read the iag */
2744 IREAD_LOCK(ipimap, RDWRLOCK_IMAP);
2745 rc = diIAGRead(imap, iagno, &mp);
2746 IREAD_UNLOCK(ipimap);
2747 if (rc)
2748 return (rc);
2749 metapage_wait_for_io(mp);
2750 iagp = (struct iag *) mp->data;
2751 /* get the inode number and extent number of the inode within
2752 * the iag and the inode number within the extent.
2753 */
2754 ino = inum & (INOSPERIAG - 1);
2755 extno = ino >> L2INOSPEREXT;
2756 bitno = ino & (INOSPEREXT - 1);
2757 mask = HIGHORDER >> bitno;
2758 /*
2759 * mark the inode free in persistent map:
2760 */
2761 if (is_free) {
2762 /* The inode should have been allocated both in working
2763 * map and in persistent map;
2764 * the inode will be freed from working map at the release
2765 * of last reference release;
2766 */
2767 if (!(le32_to_cpu(iagp->wmap[extno]) & mask)) {
2768 jfs_error(ipimap->i_sb,
2769 "inode %ld not marked as allocated in wmap!\n",
2770 inum);
2771 }
2772 if (!(le32_to_cpu(iagp->pmap[extno]) & mask)) {
2773 jfs_error(ipimap->i_sb,
2774 "inode %ld not marked as allocated in pmap!\n",
2775 inum);
2776 }
2777 /* update the bitmap for the extent of the freed inode */
2778 iagp->pmap[extno] &= cpu_to_le32(~mask);
2779 }
2780 /*
2781 * mark the inode allocated in persistent map:
2782 */
2783 else {
2784 /* The inode should be already allocated in the working map
2785 * and should be free in persistent map;
2786 */
2787 if (!(le32_to_cpu(iagp->wmap[extno]) & mask)) {
2788 release_metapage(mp);
2789 jfs_error(ipimap->i_sb,
2790 "the inode is not allocated in the working map\n");
2791 return -EIO;
2792 }
2793 if ((le32_to_cpu(iagp->pmap[extno]) & mask) != 0) {
2794 release_metapage(mp);
2795 jfs_error(ipimap->i_sb,
2796 "the inode is not free in the persistent map\n");
2797 return -EIO;
2798 }
2799 /* update the bitmap for the extent of the allocated inode */
2800 iagp->pmap[extno] |= cpu_to_le32(mask);
2801 }
2802 /*
2803 * update iag lsn
2804 */
2805 lsn = tblk->lsn;
2806 log = JFS_SBI(tblk->sb)->log;
2807 LOGSYNC_LOCK(log, flags);
2808 if (mp->lsn != 0) {
2809 /* inherit older/smaller lsn */
2810 logdiff(difft, lsn, log);
2811 logdiff(diffp, mp->lsn, log);
2812 if (difft < diffp) {
2813 mp->lsn = lsn;
2814 /* move mp after tblock in logsync list */
2815 list_move(&mp->synclist, &tblk->synclist);
2816 }
2817 /* inherit younger/larger clsn */
2818 assert(mp->clsn);
2819 logdiff(difft, tblk->clsn, log);
2820 logdiff(diffp, mp->clsn, log);
2821 if (difft > diffp)
2822 mp->clsn = tblk->clsn;
2823 } else {
2824 mp->log = log;
2825 mp->lsn = lsn;
2826 /* insert mp after tblock in logsync list */
2827 log->count++;
2828 list_add(&mp->synclist, &tblk->synclist);
2829 mp->clsn = tblk->clsn;
2830 }
2831 LOGSYNC_UNLOCK(log, flags);
2832 write_metapage(mp);
2833 return (0);
2834 }
2835
2836 /*
2837 * diExtendFS()
2838 *
2839 * function: update imap for extendfs();
2840 *
2841 * note: AG size has been increased s.t. each k old contiguous AGs are
2842 * coalesced into a new AG;
2843 */
diExtendFS(struct inode * ipimap,struct inode * ipbmap)2844 int diExtendFS(struct inode *ipimap, struct inode *ipbmap)
2845 {
2846 int rc, rcx = 0;
2847 struct inomap *imap = JFS_IP(ipimap)->i_imap;
2848 struct iag *iagp = NULL, *hiagp = NULL;
2849 struct bmap *mp = JFS_SBI(ipbmap->i_sb)->bmap;
2850 struct metapage *bp, *hbp;
2851 int i, n, head;
2852 int numinos, xnuminos = 0, xnumfree = 0;
2853 s64 agstart;
2854
2855 jfs_info("diExtendFS: nextiag:%d numinos:%d numfree:%d",
2856 imap->im_nextiag, atomic_read(&imap->im_numinos),
2857 atomic_read(&imap->im_numfree));
2858
2859 /*
2860 * reconstruct imap
2861 *
2862 * coalesce contiguous k (newAGSize/oldAGSize) AGs;
2863 * i.e., (AGi, ..., AGj) where i = k*n and j = k*(n+1) - 1 to AGn;
2864 * note: new AG size = old AG size * (2**x).
2865 */
2866
2867 /* init per AG control information im_agctl[] */
2868 for (i = 0; i < MAXAG; i++) {
2869 imap->im_agctl[i].inofree = -1;
2870 imap->im_agctl[i].extfree = -1;
2871 imap->im_agctl[i].numinos = 0; /* number of backed inodes */
2872 imap->im_agctl[i].numfree = 0; /* number of free backed inodes */
2873 }
2874
2875 /*
2876 * process each iag page of the map.
2877 *
2878 * rebuild AG Free Inode List, AG Free Inode Extent List;
2879 */
2880 for (i = 0; i < imap->im_nextiag; i++) {
2881 if ((rc = diIAGRead(imap, i, &bp))) {
2882 rcx = rc;
2883 continue;
2884 }
2885 iagp = (struct iag *) bp->data;
2886 if (le32_to_cpu(iagp->iagnum) != i) {
2887 release_metapage(bp);
2888 jfs_error(ipimap->i_sb, "unexpected value of iagnum\n");
2889 return -EIO;
2890 }
2891
2892 /* leave free iag in the free iag list */
2893 if (iagp->nfreeexts == cpu_to_le32(EXTSPERIAG)) {
2894 release_metapage(bp);
2895 continue;
2896 }
2897
2898 agstart = le64_to_cpu(iagp->agstart);
2899 n = agstart >> mp->db_agl2size;
2900 iagp->agstart = cpu_to_le64((s64)n << mp->db_agl2size);
2901
2902 /* compute backed inodes */
2903 numinos = (EXTSPERIAG - le32_to_cpu(iagp->nfreeexts))
2904 << L2INOSPEREXT;
2905 if (numinos > 0) {
2906 /* merge AG backed inodes */
2907 imap->im_agctl[n].numinos += numinos;
2908 xnuminos += numinos;
2909 }
2910
2911 /* if any backed free inodes, insert at AG free inode list */
2912 if ((int) le32_to_cpu(iagp->nfreeinos) > 0) {
2913 if ((head = imap->im_agctl[n].inofree) == -1) {
2914 iagp->inofreefwd = cpu_to_le32(-1);
2915 iagp->inofreeback = cpu_to_le32(-1);
2916 } else {
2917 if ((rc = diIAGRead(imap, head, &hbp))) {
2918 rcx = rc;
2919 goto nextiag;
2920 }
2921 hiagp = (struct iag *) hbp->data;
2922 hiagp->inofreeback = iagp->iagnum;
2923 iagp->inofreefwd = cpu_to_le32(head);
2924 iagp->inofreeback = cpu_to_le32(-1);
2925 write_metapage(hbp);
2926 }
2927
2928 imap->im_agctl[n].inofree =
2929 le32_to_cpu(iagp->iagnum);
2930
2931 /* merge AG backed free inodes */
2932 imap->im_agctl[n].numfree +=
2933 le32_to_cpu(iagp->nfreeinos);
2934 xnumfree += le32_to_cpu(iagp->nfreeinos);
2935 }
2936
2937 /* if any free extents, insert at AG free extent list */
2938 if (le32_to_cpu(iagp->nfreeexts) > 0) {
2939 if ((head = imap->im_agctl[n].extfree) == -1) {
2940 iagp->extfreefwd = cpu_to_le32(-1);
2941 iagp->extfreeback = cpu_to_le32(-1);
2942 } else {
2943 if ((rc = diIAGRead(imap, head, &hbp))) {
2944 rcx = rc;
2945 goto nextiag;
2946 }
2947 hiagp = (struct iag *) hbp->data;
2948 hiagp->extfreeback = iagp->iagnum;
2949 iagp->extfreefwd = cpu_to_le32(head);
2950 iagp->extfreeback = cpu_to_le32(-1);
2951 write_metapage(hbp);
2952 }
2953
2954 imap->im_agctl[n].extfree =
2955 le32_to_cpu(iagp->iagnum);
2956 }
2957
2958 nextiag:
2959 write_metapage(bp);
2960 }
2961
2962 if (xnuminos != atomic_read(&imap->im_numinos) ||
2963 xnumfree != atomic_read(&imap->im_numfree)) {
2964 jfs_error(ipimap->i_sb, "numinos or numfree incorrect\n");
2965 return -EIO;
2966 }
2967
2968 return rcx;
2969 }
2970
2971
2972 /*
2973 * duplicateIXtree()
2974 *
2975 * serialization: IWRITE_LOCK held on entry/exit
2976 *
2977 * note: shadow page with regular inode (rel.2);
2978 */
duplicateIXtree(struct super_block * sb,s64 blkno,int xlen,s64 * xaddr)2979 static void duplicateIXtree(struct super_block *sb, s64 blkno,
2980 int xlen, s64 *xaddr)
2981 {
2982 struct jfs_superblock *j_sb;
2983 struct buffer_head *bh;
2984 struct inode *ip;
2985 tid_t tid;
2986
2987 /* if AIT2 ipmap2 is bad, do not try to update it */
2988 if (JFS_SBI(sb)->mntflag & JFS_BAD_SAIT) /* s_flag */
2989 return;
2990 ip = diReadSpecial(sb, FILESYSTEM_I, 1);
2991 if (ip == NULL) {
2992 JFS_SBI(sb)->mntflag |= JFS_BAD_SAIT;
2993 if (readSuper(sb, &bh))
2994 return;
2995 j_sb = (struct jfs_superblock *)bh->b_data;
2996 j_sb->s_flag |= cpu_to_le32(JFS_BAD_SAIT);
2997
2998 mark_buffer_dirty(bh);
2999 sync_dirty_buffer(bh);
3000 brelse(bh);
3001 return;
3002 }
3003
3004 /* start transaction */
3005 tid = txBegin(sb, COMMIT_FORCE);
3006 /* update the inode map addressing structure to point to it */
3007 if (xtInsert(tid, ip, 0, blkno, xlen, xaddr, 0)) {
3008 JFS_SBI(sb)->mntflag |= JFS_BAD_SAIT;
3009 txAbort(tid, 1);
3010 goto cleanup;
3011
3012 }
3013 /* update the inode map's inode to reflect the extension */
3014 ip->i_size += PSIZE;
3015 inode_add_bytes(ip, PSIZE);
3016 txCommit(tid, 1, &ip, COMMIT_FORCE);
3017 cleanup:
3018 txEnd(tid);
3019 diFreeSpecial(ip);
3020 }
3021
3022 /*
3023 * NAME: copy_from_dinode()
3024 *
3025 * FUNCTION: Copies inode info from disk inode to in-memory inode
3026 *
3027 * RETURN VALUES:
3028 * 0 - success
3029 * -ENOMEM - insufficient memory
3030 */
copy_from_dinode(struct dinode * dip,struct inode * ip)3031 static int copy_from_dinode(struct dinode * dip, struct inode *ip)
3032 {
3033 struct jfs_inode_info *jfs_ip = JFS_IP(ip);
3034 struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
3035
3036 jfs_ip->fileset = le32_to_cpu(dip->di_fileset);
3037 jfs_ip->mode2 = le32_to_cpu(dip->di_mode);
3038 jfs_set_inode_flags(ip);
3039
3040 ip->i_mode = le32_to_cpu(dip->di_mode) & 0xffff;
3041 if (sbi->umask != -1) {
3042 ip->i_mode = (ip->i_mode & ~0777) | (0777 & ~sbi->umask);
3043 /* For directories, add x permission if r is allowed by umask */
3044 if (S_ISDIR(ip->i_mode)) {
3045 if (ip->i_mode & 0400)
3046 ip->i_mode |= 0100;
3047 if (ip->i_mode & 0040)
3048 ip->i_mode |= 0010;
3049 if (ip->i_mode & 0004)
3050 ip->i_mode |= 0001;
3051 }
3052 }
3053 set_nlink(ip, le32_to_cpu(dip->di_nlink));
3054
3055 jfs_ip->saved_uid = make_kuid(&init_user_ns, le32_to_cpu(dip->di_uid));
3056 if (!uid_valid(sbi->uid))
3057 ip->i_uid = jfs_ip->saved_uid;
3058 else {
3059 ip->i_uid = sbi->uid;
3060 }
3061
3062 jfs_ip->saved_gid = make_kgid(&init_user_ns, le32_to_cpu(dip->di_gid));
3063 if (!gid_valid(sbi->gid))
3064 ip->i_gid = jfs_ip->saved_gid;
3065 else {
3066 ip->i_gid = sbi->gid;
3067 }
3068
3069 ip->i_size = le64_to_cpu(dip->di_size);
3070 ip->i_atime.tv_sec = le32_to_cpu(dip->di_atime.tv_sec);
3071 ip->i_atime.tv_nsec = le32_to_cpu(dip->di_atime.tv_nsec);
3072 ip->i_mtime.tv_sec = le32_to_cpu(dip->di_mtime.tv_sec);
3073 ip->i_mtime.tv_nsec = le32_to_cpu(dip->di_mtime.tv_nsec);
3074 ip->i_ctime.tv_sec = le32_to_cpu(dip->di_ctime.tv_sec);
3075 ip->i_ctime.tv_nsec = le32_to_cpu(dip->di_ctime.tv_nsec);
3076 ip->i_blocks = LBLK2PBLK(ip->i_sb, le64_to_cpu(dip->di_nblocks));
3077 ip->i_generation = le32_to_cpu(dip->di_gen);
3078
3079 jfs_ip->ixpxd = dip->di_ixpxd; /* in-memory pxd's are little-endian */
3080 jfs_ip->acl = dip->di_acl; /* as are dxd's */
3081 jfs_ip->ea = dip->di_ea;
3082 jfs_ip->next_index = le32_to_cpu(dip->di_next_index);
3083 jfs_ip->otime = le32_to_cpu(dip->di_otime.tv_sec);
3084 jfs_ip->acltype = le32_to_cpu(dip->di_acltype);
3085
3086 if (S_ISCHR(ip->i_mode) || S_ISBLK(ip->i_mode)) {
3087 jfs_ip->dev = le32_to_cpu(dip->di_rdev);
3088 ip->i_rdev = new_decode_dev(jfs_ip->dev);
3089 }
3090
3091 if (S_ISDIR(ip->i_mode)) {
3092 memcpy(&jfs_ip->u.dir, &dip->u._dir, 384);
3093 } else if (S_ISREG(ip->i_mode) || S_ISLNK(ip->i_mode)) {
3094 memcpy(&jfs_ip->i_xtroot, &dip->di_xtroot, 288);
3095 } else
3096 memcpy(&jfs_ip->i_inline_ea, &dip->di_inlineea, 128);
3097
3098 /* Zero the in-memory-only stuff */
3099 jfs_ip->cflag = 0;
3100 jfs_ip->btindex = 0;
3101 jfs_ip->btorder = 0;
3102 jfs_ip->bxflag = 0;
3103 jfs_ip->blid = 0;
3104 jfs_ip->atlhead = 0;
3105 jfs_ip->atltail = 0;
3106 jfs_ip->xtlid = 0;
3107 return (0);
3108 }
3109
3110 /*
3111 * NAME: copy_to_dinode()
3112 *
3113 * FUNCTION: Copies inode info from in-memory inode to disk inode
3114 */
copy_to_dinode(struct dinode * dip,struct inode * ip)3115 static void copy_to_dinode(struct dinode * dip, struct inode *ip)
3116 {
3117 struct jfs_inode_info *jfs_ip = JFS_IP(ip);
3118 struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
3119
3120 dip->di_fileset = cpu_to_le32(jfs_ip->fileset);
3121 dip->di_inostamp = cpu_to_le32(sbi->inostamp);
3122 dip->di_number = cpu_to_le32(ip->i_ino);
3123 dip->di_gen = cpu_to_le32(ip->i_generation);
3124 dip->di_size = cpu_to_le64(ip->i_size);
3125 dip->di_nblocks = cpu_to_le64(PBLK2LBLK(ip->i_sb, ip->i_blocks));
3126 dip->di_nlink = cpu_to_le32(ip->i_nlink);
3127 if (!uid_valid(sbi->uid))
3128 dip->di_uid = cpu_to_le32(i_uid_read(ip));
3129 else
3130 dip->di_uid =cpu_to_le32(from_kuid(&init_user_ns,
3131 jfs_ip->saved_uid));
3132 if (!gid_valid(sbi->gid))
3133 dip->di_gid = cpu_to_le32(i_gid_read(ip));
3134 else
3135 dip->di_gid = cpu_to_le32(from_kgid(&init_user_ns,
3136 jfs_ip->saved_gid));
3137 /*
3138 * mode2 is only needed for storing the higher order bits.
3139 * Trust i_mode for the lower order ones
3140 */
3141 if (sbi->umask == -1)
3142 dip->di_mode = cpu_to_le32((jfs_ip->mode2 & 0xffff0000) |
3143 ip->i_mode);
3144 else /* Leave the original permissions alone */
3145 dip->di_mode = cpu_to_le32(jfs_ip->mode2);
3146
3147 dip->di_atime.tv_sec = cpu_to_le32(ip->i_atime.tv_sec);
3148 dip->di_atime.tv_nsec = cpu_to_le32(ip->i_atime.tv_nsec);
3149 dip->di_ctime.tv_sec = cpu_to_le32(ip->i_ctime.tv_sec);
3150 dip->di_ctime.tv_nsec = cpu_to_le32(ip->i_ctime.tv_nsec);
3151 dip->di_mtime.tv_sec = cpu_to_le32(ip->i_mtime.tv_sec);
3152 dip->di_mtime.tv_nsec = cpu_to_le32(ip->i_mtime.tv_nsec);
3153 dip->di_ixpxd = jfs_ip->ixpxd; /* in-memory pxd's are little-endian */
3154 dip->di_acl = jfs_ip->acl; /* as are dxd's */
3155 dip->di_ea = jfs_ip->ea;
3156 dip->di_next_index = cpu_to_le32(jfs_ip->next_index);
3157 dip->di_otime.tv_sec = cpu_to_le32(jfs_ip->otime);
3158 dip->di_otime.tv_nsec = 0;
3159 dip->di_acltype = cpu_to_le32(jfs_ip->acltype);
3160 if (S_ISCHR(ip->i_mode) || S_ISBLK(ip->i_mode))
3161 dip->di_rdev = cpu_to_le32(jfs_ip->dev);
3162 }
3163