• Home
  • Raw
  • Download

Lines Matching +full:ot +full:- +full:level +full:- +full:select

1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) International Business Machines Corp., 2000-2004
4 * Portions Copyright (C) Christoph Hellwig, 2001-2002
24 * transactions which wrote COMMIT records in the same in-memory
38 * careful-write (ping-pong) of last logpage to recover from crash
40 * detection of split (out-of-order) write of physical sectors
45 * lsn - 64-bit monotonically increasing integer vs
46 * 32-bit lspn and page eor.
80 #define LOG_LOCK_INIT(log) mutex_init(&(log)->loglock)
81 #define LOG_LOCK(log) mutex_lock(&((log)->loglock))
82 #define LOG_UNLOCK(log) mutex_unlock(&((log)->loglock))
89 #define LOGGC_LOCK_INIT(log) spin_lock_init(&(log)->gclock)
90 #define LOGGC_LOCK(log) spin_lock_irq(&(log)->gclock)
91 #define LOGGC_UNLOCK(log) spin_unlock_irq(&(log)->gclock)
92 #define LOGGC_WAKEUP(tblk) wake_up_all(&(tblk)->gcwait)
148 #define lbmGC 0x0080 /* lbmIODone to perform post-GC processing
206 list_for_each_entry(sbi, &log->sb_list, log_list) { in write_special_inodes()
207 writer(sbi->ipbmap->i_mapping); in write_special_inodes()
208 writer(sbi->ipimap->i_mapping); in write_special_inodes()
209 writer(sbi->direct_inode->i_mapping); in write_special_inodes()
220 * RETURN: lsn - offset to the next log record to write (end-of-log);
221 * -1 - error;
238 /* log by (out-of-transaction) JFS ? */ in lmLog()
244 tlck->type & tlckBTROOT || (mp = tlck->mp) == NULL) in lmLog()
250 lsn = log->lsn; in lmLog()
257 if (mp->lsn == 0) { in lmLog()
258 mp->log = log; in lmLog()
259 mp->lsn = lsn; in lmLog()
260 log->count++; in lmLog()
263 list_add_tail(&mp->synclist, &log->synclist); in lmLog()
273 * B+-tree index of extent descriptors for block in lmLog()
284 if (tblk->lsn == 0) { in lmLog()
286 tblk->lsn = mp->lsn; in lmLog()
287 log->count++; in lmLog()
290 list_add(&tblk->synclist, &mp->synclist); in lmLog()
297 logdiff(diffp, mp->lsn, log); in lmLog()
298 logdiff(difft, tblk->lsn, log); in lmLog()
301 tblk->lsn = mp->lsn; in lmLog()
304 list_move(&tblk->synclist, &mp->synclist); in lmLog()
320 if (diffp >= log->nextsync) in lmLog()
323 /* update end-of-log lsn */ in lmLog()
324 log->lsn = lsn; in lmLog()
328 /* return end-of-log address */ in lmLog()
337 * PARAMETER: cd - commit descriptor
339 * RETURN: end-of-log address
347 int lsn = 0; /* end-of-log address */ in lmWriteRecord()
351 int dstoffset; /* end-of-log offset in log page */ in lmWriteRecord()
353 caddr_t p; /* src meta-data page */ in lmWriteRecord()
367 bp = (struct lbuf *) log->bp; in lmWriteRecord()
368 lp = (struct logpage *) bp->l_ldata; in lmWriteRecord()
369 dstoffset = log->eor; in lmWriteRecord()
378 /* retrieve source meta-data page to log */ in lmWriteRecord()
379 if (tlck->flag & tlckPAGELOCK) { in lmWriteRecord()
380 p = (caddr_t) (tlck->mp->data); in lmWriteRecord()
381 linelock = (struct linelock *) & tlck->lock; in lmWriteRecord()
383 /* retrieve source in-memory inode to log */ in lmWriteRecord()
384 else if (tlck->flag & tlckINODELOCK) { in lmWriteRecord()
385 if (tlck->type & tlckDTREE) in lmWriteRecord()
386 p = (caddr_t) &JFS_IP(tlck->ip)->i_dtroot; in lmWriteRecord()
388 p = (caddr_t) &JFS_IP(tlck->ip)->i_xtroot; in lmWriteRecord()
389 linelock = (struct linelock *) & tlck->lock; in lmWriteRecord()
392 else if (tlck->flag & tlckINLINELOCK) { in lmWriteRecord()
395 p = (caddr_t) & inlinelock->pxd; in lmWriteRecord()
403 l2linesize = linelock->l2linesize; in lmWriteRecord()
406 ASSERT(linelock->index <= linelock->maxcnt); in lmWriteRecord()
408 lv = linelock->lv; in lmWriteRecord()
409 for (i = 0; i < linelock->index; i++, lv++) { in lmWriteRecord()
410 if (lv->length == 0) in lmWriteRecord()
414 if (dstoffset >= LOGPSIZE - LOGPTLRSIZE) { in lmWriteRecord()
418 bp = log->bp; in lmWriteRecord()
419 lp = (struct logpage *) bp->l_ldata; in lmWriteRecord()
426 src = (u8 *) p + (lv->offset << l2linesize); in lmWriteRecord()
427 srclen = lv->length << l2linesize; in lmWriteRecord()
430 freespace = (LOGPSIZE - LOGPTLRSIZE) - dstoffset; in lmWriteRecord()
437 if (dstoffset < LOGPSIZE - LOGPTLRSIZE) in lmWriteRecord()
443 bp = (struct lbuf *) log->bp; in lmWriteRecord()
444 lp = (struct logpage *) bp->l_ldata; in lmWriteRecord()
447 srclen -= nbytes; in lmWriteRecord()
456 lvd->offset = cpu_to_le16(lv->offset); in lmWriteRecord()
457 lvd->length = cpu_to_le16(lv->length); in lmWriteRecord()
460 lv->offset, lv->length); in lmWriteRecord()
463 if ((i = linelock->next)) { in lmWriteRecord()
472 lrd->length = cpu_to_le16(len); in lmWriteRecord()
478 freespace = (LOGPSIZE - LOGPTLRSIZE) - dstoffset; in lmWriteRecord()
484 srclen -= nbytes; in lmWriteRecord()
495 log->eor = dstoffset; in lmWriteRecord()
496 bp->l_eor = dstoffset; in lmWriteRecord()
497 lsn = (log->page << L2LOGPSIZE) + dstoffset; in lmWriteRecord()
499 if (lrd->type & cpu_to_le16(LOG_COMMIT)) { in lmWriteRecord()
500 tblk->clsn = lsn; in lmWriteRecord()
501 jfs_info("wr: tclsn:0x%x, beor:0x%x", tblk->clsn, in lmWriteRecord()
502 bp->l_eor); in lmWriteRecord()
509 * enqueue tblock of non-trivial/synchronous COMMIT in lmWriteRecord()
517 tblk->flag = tblkGC_QUEUE; in lmWriteRecord()
518 tblk->bp = log->bp; in lmWriteRecord()
519 tblk->pn = log->page; in lmWriteRecord()
520 tblk->eor = log->eor; in lmWriteRecord()
523 list_add_tail(&tblk->cqueue, &log->cqueue); in lmWriteRecord()
529 le16_to_cpu(lrd->type), log->bp, log->page, dstoffset); in lmWriteRecord()
532 if (dstoffset < LOGPSIZE - LOGPTLRSIZE) in lmWriteRecord()
539 bp = (struct lbuf *) log->bp; in lmWriteRecord()
540 lp = (struct logpage *) bp->l_ldata; in lmWriteRecord()
570 pn = log->page; in lmNextPage()
571 bp = log->bp; in lmNextPage()
572 lp = (struct logpage *) bp->l_ldata; in lmNextPage()
573 lspn = le32_to_cpu(lp->h.page); in lmNextPage()
581 if (list_empty(&log->cqueue)) in lmNextPage()
584 tblk = list_entry(log->cqueue.prev, struct tblock, cqueue); in lmNextPage()
596 if (tblk && tblk->pn == pn) { in lmNextPage()
597 /* mark tblk for end-of-page */ in lmNextPage()
598 tblk->flag |= tblkGC_EOP; in lmNextPage()
600 if (log->cflag & logGC_PAGEOUT) { in lmNextPage()
607 if (bp->l_wqnext == NULL) in lmNextPage()
613 log->cflag |= logGC_PAGEOUT; in lmNextPage()
622 bp->l_ceor = bp->l_eor; in lmNextPage()
623 lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor); in lmNextPage()
634 log->page = (pn == log->size - 1) ? 2 : pn + 1; in lmNextPage()
635 log->eor = LOGPHDRSIZE; /* ? valid page empty/full at logRedo() */ in lmNextPage()
638 nextbp = lbmAllocate(log, log->page); in lmNextPage()
639 nextbp->l_eor = log->eor; in lmNextPage()
640 log->bp = nextbp; in lmNextPage()
643 lp = (struct logpage *) nextbp->l_ldata; in lmNextPage()
644 lp->h.page = lp->t.page = cpu_to_le32(lspn + 1); in lmNextPage()
645 lp->h.eor = lp->t.eor = cpu_to_le16(LOGPHDRSIZE); in lmNextPage()
656 * page number - redrive pageout of the page at the head of
673 if (tblk->flag & tblkGC_COMMITTED) { in lmGroupCommit()
674 if (tblk->flag & tblkGC_ERROR) in lmGroupCommit()
675 rc = -EIO; in lmGroupCommit()
680 jfs_info("lmGroup Commit: tblk = 0x%p, gcrtc = %d", tblk, log->gcrtc); in lmGroupCommit()
682 if (tblk->xflag & COMMIT_LAZY) in lmGroupCommit()
683 tblk->flag |= tblkGC_LAZY; in lmGroupCommit()
685 if ((!(log->cflag & logGC_PAGEOUT)) && (!list_empty(&log->cqueue)) && in lmGroupCommit()
686 (!(tblk->xflag & COMMIT_LAZY) || test_bit(log_FLUSH, &log->flag) in lmGroupCommit()
693 log->cflag |= logGC_PAGEOUT; in lmGroupCommit()
698 if (tblk->xflag & COMMIT_LAZY) { in lmGroupCommit()
708 if (tblk->flag & tblkGC_COMMITTED) { in lmGroupCommit()
709 if (tblk->flag & tblkGC_ERROR) in lmGroupCommit()
710 rc = -EIO; in lmGroupCommit()
718 log->gcrtc++; in lmGroupCommit()
719 tblk->flag |= tblkGC_READY; in lmGroupCommit()
721 __SLEEP_COND(tblk->gcwait, (tblk->flag & tblkGC_COMMITTED), in lmGroupCommit()
725 if (tblk->flag & tblkGC_ERROR) in lmGroupCommit()
726 rc = -EIO; in lmGroupCommit()
760 gcpn = list_entry(log->cqueue.next, struct tblock, cqueue)->pn; in lmGCwrite()
762 list_for_each_entry(tblk, &log->cqueue, cqueue) { in lmGCwrite()
763 if (tblk->pn != gcpn) in lmGCwrite()
768 /* state transition: (QUEUE, READY) -> COMMIT */ in lmGCwrite()
769 tblk->flag |= tblkGC_COMMIT; in lmGCwrite()
776 bp = (struct lbuf *) tblk->bp; in lmGCwrite()
777 lp = (struct logpage *) bp->l_ldata; in lmGCwrite()
779 if (tblk->flag & tblkGC_EOP) { in lmGCwrite()
781 tblk->flag &= ~tblkGC_EOP; in lmGCwrite()
782 tblk->flag |= tblkGC_FREE; in lmGCwrite()
783 bp->l_ceor = bp->l_eor; in lmGCwrite()
784 lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor); in lmGCwrite()
791 bp->l_ceor = tblk->eor; /* ? bp->l_ceor = bp->l_eor; */ in lmGCwrite()
792 lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor); in lmGCwrite()
801 * FUNCTION: group commit post-processing
813 struct jfs_log *log = bp->l_log; in lmPostGC()
818 spin_lock_irqsave(&log->gclock, flags); in lmPostGC()
825 list_for_each_entry_safe(tblk, temp, &log->cqueue, cqueue) { in lmPostGC()
826 if (!(tblk->flag & tblkGC_COMMIT)) in lmPostGC()
830 * and made it to disk - it is committed. in lmPostGC()
833 if (bp->l_flag & lbmERROR) in lmPostGC()
834 tblk->flag |= tblkGC_ERROR; in lmPostGC()
837 list_del(&tblk->cqueue); in lmPostGC()
838 tblk->flag &= ~tblkGC_QUEUE; in lmPostGC()
840 if (tblk == log->flush_tblk) { in lmPostGC()
842 clear_bit(log_FLUSH, &log->flag); in lmPostGC()
843 log->flush_tblk = NULL; in lmPostGC()
847 tblk->flag); in lmPostGC()
849 if (!(tblk->xflag & COMMIT_FORCE)) in lmPostGC()
855 /* state transition: COMMIT -> COMMITTED */ in lmPostGC()
856 tblk->flag |= tblkGC_COMMITTED; in lmPostGC()
858 if (tblk->flag & tblkGC_READY) in lmPostGC()
859 log->gcrtc--; in lmPostGC()
867 if (tblk->flag & tblkGC_FREE) in lmPostGC()
872 else if (tblk->flag & tblkGC_EOP) { in lmPostGC()
874 lp = (struct logpage *) bp->l_ldata; in lmPostGC()
875 bp->l_ceor = bp->l_eor; in lmPostGC()
876 lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor); in lmPostGC()
888 * select the latest ready transaction as new group leader and in lmPostGC()
891 if ((!list_empty(&log->cqueue)) && in lmPostGC()
892 ((log->gcrtc > 0) || (tblk->bp->l_wqnext != NULL) || in lmPostGC()
893 test_bit(log_FLUSH, &log->flag) || jfs_tlocks_low)) in lmPostGC()
905 log->cflag &= ~logGC_PAGEOUT; in lmPostGC()
908 spin_unlock_irqrestore(&log->gclock, flags); in lmPostGC()
917 * (normally the case if sync() is executed by back-ground
922 * PARAMETERS: log - log structure
923 * hard_sync - 1 to force all metadata to be written
954 if (log->sync == log->syncpt) { in lmLogSync()
956 if (list_empty(&log->synclist)) in lmLogSync()
957 log->sync = log->lsn; in lmLogSync()
959 lp = list_entry(log->synclist.next, in lmLogSync()
961 log->sync = lp->lsn; in lmLogSync()
971 if (log->sync != log->syncpt) { in lmLogSync()
976 lrd.log.syncpt.sync = cpu_to_le32(log->sync); in lmLogSync()
979 log->syncpt = log->sync; in lmLogSync()
981 lsn = log->lsn; in lmLogSync()
986 logsize = log->logsize; in lmLogSync()
989 free = logsize - written; in lmLogSync()
997 * option 1 - panic ? No.! in lmLogSync()
998 * option 2 - shutdown file systems in lmLogSync()
1000 * option 3 - extend log ? in lmLogSync()
1001 * option 4 - second chance in lmLogSync()
1010 /* log->state = LOGWRAP; */ in lmLogSync()
1013 log->syncpt = log->sync = lsn; in lmLogSync()
1014 log->nextsync = delta; in lmLogSync()
1017 log->nextsync = written + more; in lmLogSync()
1024 if (!test_bit(log_SYNCBARRIER, &log->flag) && in lmLogSync()
1025 (written > LOGSYNC_BARRIER(logsize)) && log->active) { in lmLogSync()
1026 set_bit(log_SYNCBARRIER, &log->flag); in lmLogSync()
1028 log->syncpt); in lmLogSync()
1043 * PARAMETERS: log - log structure
1044 * hard_sync - set to 1 to force metadata to be written
1048 if (!test_bit(log_QUIESCE, &log->flag)) in jfs_syncpt()
1059 * PARAMETER: ipmnt - file system mount inode
1060 * iplog - log inode (out)
1073 if (sbi->flag & JFS_NOINTEGRITY) in lmLogOpen()
1076 if (sbi->mntflag & JFS_INLINELOG) in lmLogOpen()
1081 if (log->bdev->bd_dev == sbi->logdev) { in lmLogOpen()
1082 if (!uuid_equal(&log->uuid, &sbi->loguuid)) { in lmLogOpen()
1085 return -EINVAL; in lmLogOpen()
1100 return -ENOMEM; in lmLogOpen()
1102 INIT_LIST_HEAD(&log->sb_list); in lmLogOpen()
1103 init_waitqueue_head(&log->syncwait); in lmLogOpen()
1108 * file systems to log may have n-to-1 relationship; in lmLogOpen()
1111 bdev = blkdev_get_by_dev(sbi->logdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, in lmLogOpen()
1118 log->bdev = bdev; in lmLogOpen()
1119 uuid_copy(&log->uuid, &sbi->loguuid); in lmLogOpen()
1127 list_add(&log->journal_list, &jfs_external_logs); in lmLogOpen()
1137 list_add(&sbi->log_list, &log->sb_list); in lmLogOpen()
1138 sbi->log = log; in lmLogOpen()
1148 list_del(&log->journal_list); in lmLogOpen()
1168 return -ENOMEM; in open_inline_log()
1169 INIT_LIST_HEAD(&log->sb_list); in open_inline_log()
1170 init_waitqueue_head(&log->syncwait); in open_inline_log()
1172 set_bit(log_INLINELOG, &log->flag); in open_inline_log()
1173 log->bdev = sb->s_bdev; in open_inline_log()
1174 log->base = addressPXD(&JFS_SBI(sb)->logpxd); in open_inline_log()
1175 log->size = lengthPXD(&JFS_SBI(sb)->logpxd) >> in open_inline_log()
1176 (L2LOGPSIZE - sb->s_blocksize_bits); in open_inline_log()
1177 log->l2bsize = sb->s_blocksize_bits; in open_inline_log()
1178 ASSERT(L2LOGPSIZE >= sb->s_blocksize_bits); in open_inline_log()
1189 list_add(&JFS_SBI(sb)->log_list, &log->sb_list); in open_inline_log()
1190 JFS_SBI(sb)->log = log; in open_inline_log()
1204 return -ENOMEM; in open_dummy_log()
1206 INIT_LIST_HEAD(&dummy_log->sb_list); in open_dummy_log()
1207 init_waitqueue_head(&dummy_log->syncwait); in open_dummy_log()
1208 dummy_log->no_integrity = 1; in open_dummy_log()
1210 dummy_log->base = 0; in open_dummy_log()
1211 dummy_log->size = 1024; in open_dummy_log()
1222 list_add(&JFS_SBI(sb)->log_list, &dummy_log->sb_list); in open_dummy_log()
1223 JFS_SBI(sb)->log = dummy_log; in open_dummy_log()
1240 * PARAMETER: log - log structure
1242 * RETURN: 0 - if ok
1243 * -EINVAL - bad log magic number or superblock dirty
1268 INIT_LIST_HEAD(&log->synclist); in lmLogInit()
1270 INIT_LIST_HEAD(&log->cqueue); in lmLogInit()
1271 log->flush_tblk = NULL; in lmLogInit()
1273 log->count = 0; in lmLogInit()
1281 if (!test_bit(log_INLINELOG, &log->flag)) in lmLogInit()
1282 log->l2bsize = L2LOGPSIZE; in lmLogInit()
1285 if (log->no_integrity) { in lmLogInit()
1292 log->bp = bp; in lmLogInit()
1293 bp->l_pn = bp->l_eor = 0; in lmLogInit()
1301 logsuper = (struct logsuper *) bpsuper->l_ldata; in lmLogInit()
1303 if (logsuper->magic != cpu_to_le32(LOGMAGIC)) { in lmLogInit()
1305 rc = -EINVAL; in lmLogInit()
1310 if (logsuper->state != cpu_to_le32(LOGREDONE)) { in lmLogInit()
1312 rc = -EINVAL; in lmLogInit()
1317 if (test_bit(log_INLINELOG,&log->flag)) { in lmLogInit()
1318 if (log->size != le32_to_cpu(logsuper->size)) { in lmLogInit()
1319 rc = -EINVAL; in lmLogInit()
1323 log, (unsigned long long)log->base, log->size); in lmLogInit()
1325 if (!uuid_equal(&logsuper->uuid, &log->uuid)) { in lmLogInit()
1327 rc = -EINVAL; in lmLogInit()
1330 log->size = le32_to_cpu(logsuper->size); in lmLogInit()
1331 log->l2bsize = le32_to_cpu(logsuper->l2bsize); in lmLogInit()
1333 log, (unsigned long long)log->base, log->size); in lmLogInit()
1336 log->page = le32_to_cpu(logsuper->end) / LOGPSIZE; in lmLogInit()
1337 log->eor = le32_to_cpu(logsuper->end) - (LOGPSIZE * log->page); in lmLogInit()
1342 /* establish current/end-of-log page/buffer */ in lmLogInit()
1343 if ((rc = lbmRead(log, log->page, &bp))) in lmLogInit()
1346 lp = (struct logpage *) bp->l_ldata; in lmLogInit()
1349 le32_to_cpu(logsuper->end), log->page, log->eor, in lmLogInit()
1350 le16_to_cpu(lp->h.eor)); in lmLogInit()
1352 log->bp = bp; in lmLogInit()
1353 bp->l_pn = log->page; in lmLogInit()
1354 bp->l_eor = log->eor; in lmLogInit()
1357 if (log->eor >= LOGPSIZE - LOGPTLRSIZE) in lmLogInit()
1376 bp = log->bp; in lmLogInit()
1377 bp->l_ceor = bp->l_eor; in lmLogInit()
1378 lp = (struct logpage *) bp->l_ldata; in lmLogInit()
1379 lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor); in lmLogInit()
1387 logsuper->state = cpu_to_le32(LOGMOUNT); in lmLogInit()
1388 log->serial = le32_to_cpu(logsuper->serial) + 1; in lmLogInit()
1389 logsuper->serial = cpu_to_le32(log->serial); in lmLogInit()
1396 log->logsize = (log->size - 2) << L2LOGPSIZE; in lmLogInit()
1397 log->lsn = lsn; in lmLogInit()
1398 log->syncpt = lsn; in lmLogInit()
1399 log->sync = log->syncpt; in lmLogInit()
1400 log->nextsync = LOGSYNC_DELTA(log->logsize); in lmLogInit()
1403 log->lsn, log->syncpt, log->sync); in lmLogInit()
1408 log->clsn = lsn; in lmLogInit()
1416 log->wqueue = NULL; in lmLogInit()
1417 bp->l_wqnext = NULL; in lmLogInit()
1437 * PARAMETER: sb - superblock
1446 struct jfs_log *log = sbi->log; in lmLogClose()
1454 list_del(&sbi->log_list); in lmLogClose()
1456 sbi->log = NULL; in lmLogClose()
1462 sync_blockdev(sb->s_bdev); in lmLogClose()
1464 if (test_bit(log_INLINELOG, &log->flag)) { in lmLogClose()
1466 * in-line log in host file system in lmLogClose()
1473 if (!log->no_integrity) in lmLogClose()
1476 if (!list_empty(&log->sb_list)) in lmLogClose()
1483 * buffers in memory, and resuse if another no-integrity mount in lmLogClose()
1486 if (log->no_integrity) in lmLogClose()
1492 list_del(&log->journal_list); in lmLogClose()
1493 bdev = log->bdev; in lmLogClose()
1522 /* jfs_write_inode may call us during read-only mount */ in jfs_flush_journal()
1530 if (!list_empty(&log->cqueue)) { in jfs_flush_journal()
1535 target = list_entry(log->cqueue.prev, struct tblock, cqueue); in jfs_flush_journal()
1537 if (test_bit(log_FLUSH, &log->flag)) { in jfs_flush_journal()
1544 if (log->flush_tblk) in jfs_flush_journal()
1545 log->flush_tblk = target; in jfs_flush_journal()
1548 log->flush_tblk = target; in jfs_flush_journal()
1549 set_bit(log_FLUSH, &log->flag); in jfs_flush_journal()
1554 if (!(log->cflag & logGC_PAGEOUT)) { in jfs_flush_journal()
1555 log->cflag |= logGC_PAGEOUT; in jfs_flush_journal()
1560 if ((wait > 1) || test_bit(log_SYNCBARRIER, &log->flag)) { in jfs_flush_journal()
1562 set_bit(log_FLUSH, &log->flag); in jfs_flush_journal()
1563 log->flush_tblk = NULL; in jfs_flush_journal()
1566 if (wait && target && !(target->flag & tblkGC_COMMITTED)) { in jfs_flush_journal()
1569 add_wait_queue(&target->gcwait, &__wait); in jfs_flush_journal()
1574 remove_wait_queue(&target->gcwait, &__wait); in jfs_flush_journal()
1587 if ((!list_empty(&log->cqueue)) || !list_empty(&log->synclist)) { in jfs_flush_journal()
1591 if (list_empty(&log->cqueue) && in jfs_flush_journal()
1592 list_empty(&log->synclist)) in jfs_flush_journal()
1596 assert(list_empty(&log->cqueue)); in jfs_flush_journal()
1599 if (!list_empty(&log->synclist)) { in jfs_flush_journal()
1603 list_for_each_entry(lp, &log->synclist, synclist) { in jfs_flush_journal()
1604 if (lp->xflag & COMMIT_PAGE) { in jfs_flush_journal()
1611 sizeof(long), mp->page, in jfs_flush_journal()
1620 WARN_ON(!list_empty(&log->synclist)); in jfs_flush_journal()
1622 clear_bit(log_FLUSH, &log->flag); in jfs_flush_journal()
1633 * PARAMETER: log - log inode
1635 * RETURN: 0 - success
1664 bp = log->bp; in lmLogShutdown()
1665 lp = (struct logpage *) bp->l_ldata; in lmLogShutdown()
1666 lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor); in lmLogShutdown()
1667 lbmWrite(log, log->bp, lbmWRITE | lbmRELEASE | lbmSYNC, 0); in lmLogShutdown()
1668 lbmIOWait(log->bp, lbmFREE); in lmLogShutdown()
1669 log->bp = NULL; in lmLogShutdown()
1679 logsuper = (struct logsuper *) bpsuper->l_ldata; in lmLogShutdown()
1680 logsuper->state = cpu_to_le32(LOGREDONE); in lmLogShutdown()
1681 logsuper->end = cpu_to_le32(lsn); in lmLogShutdown()
1686 lsn, log->page, log->eor); in lmLogShutdown()
1707 * PARAMETE: log - pointer to logs inode.
1708 * fsdev - kdev_t of filesystem.
1709 * serial - pointer to returned log serial number
1710 * activate - insert/remove device from active list.
1712 * RETURN: 0 - success
1722 uuid_t *uuid = &sbi->uuid; in lmLogFileSystem()
1730 logsuper = (struct logsuper *) bpsuper->l_ldata; in lmLogFileSystem()
1733 if (uuid_is_null(&logsuper->active[i].uuid)) { in lmLogFileSystem()
1734 uuid_copy(&logsuper->active[i].uuid, uuid); in lmLogFileSystem()
1735 sbi->aggregate = i; in lmLogFileSystem()
1741 return -EMFILE; /* Is there a better rc? */ in lmLogFileSystem()
1745 if (uuid_equal(&logsuper->active[i].uuid, uuid)) { in lmLogFileSystem()
1746 uuid_copy(&logsuper->active[i].uuid, in lmLogFileSystem()
1753 return -EIO; in lmLogFileSystem()
1777 * ------------------------
1784 * a circular singly-linked list
1785 * (log->wrqueue points to the tail, and buffers are linked via
1786 * bp->wrqueue field), and
1787 * maintains log page in pageout ot waiting for pageout in serial pageout.
1803 log->bp = NULL; in lbmLogInit()
1806 log->wqueue = NULL; in lbmLogInit()
1817 init_waitqueue_head(&log->free_wait); in lbmLogInit()
1819 log->lbuf_free = NULL; in lbmLogInit()
1838 lbuf->l_offset = offset; in lbmLogInit()
1839 lbuf->l_ldata = buffer + offset; in lbmLogInit()
1840 lbuf->l_page = page; in lbmLogInit()
1841 lbuf->l_log = log; in lbmLogInit()
1842 init_waitqueue_head(&lbuf->l_ioevent); in lbmLogInit()
1844 lbuf->l_freelist = log->lbuf_free; in lbmLogInit()
1845 log->lbuf_free = lbuf; in lbmLogInit()
1854 return -ENOMEM; in lbmLogInit()
1869 lbuf = log->lbuf_free; in lbmLogShutdown()
1871 struct lbuf *next = lbuf->l_freelist; in lbmLogShutdown()
1872 __free_page(lbuf->l_page); in lbmLogShutdown()
1893 LCACHE_SLEEP_COND(log->free_wait, (bp = log->lbuf_free), flags); in lbmAllocate()
1894 log->lbuf_free = bp->l_freelist; in lbmAllocate()
1897 bp->l_flag = 0; in lbmAllocate()
1899 bp->l_wqnext = NULL; in lbmAllocate()
1900 bp->l_freelist = NULL; in lbmAllocate()
1902 bp->l_pn = pn; in lbmAllocate()
1903 bp->l_blkno = log->base + (pn << (L2LOGPSIZE - log->l2bsize)); in lbmAllocate()
1904 bp->l_ceor = 0; in lbmAllocate()
1928 struct jfs_log *log = bp->l_log; in lbmfree()
1930 assert(bp->l_wqnext == NULL); in lbmfree()
1935 bp->l_freelist = log->lbuf_free; in lbmfree()
1936 log->lbuf_free = bp; in lbmfree()
1938 wake_up(&log->free_wait); in lbmfree()
1949 * bp - log buffer
1959 bp->l_redrive_next = log_redrive_list; in lbmRedrive()
1981 bp->l_flag |= lbmREAD; in lbmRead()
1985 bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9); in lbmRead()
1986 bio_set_dev(bio, log->bdev); in lbmRead()
1988 bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset); in lbmRead()
1989 BUG_ON(bio->bi_iter.bi_size != LOGPSIZE); in lbmRead()
1991 bio->bi_end_io = lbmIODone; in lbmRead()
1992 bio->bi_private = bp; in lbmRead()
1993 bio->bi_opf = REQ_OP_READ; in lbmRead()
1995 if (log->no_integrity) { in lbmRead()
1996 bio->bi_iter.bi_size = 0; in lbmRead()
2002 wait_event(bp->l_ioevent, (bp->l_flag != lbmREAD)); in lbmRead()
2012 * partial-page pageout and redriven by explicit initiation of
2013 * pageout by caller until full-page pageout is completed and
2018 * queue is released at the completion of its full-page pageout.
2029 jfs_info("lbmWrite: bp:0x%p flag:0x%x pn:0x%x", bp, flag, bp->l_pn); in lbmWrite()
2032 bp->l_blkno = in lbmWrite()
2033 log->base + (bp->l_pn << (L2LOGPSIZE - log->l2bsize)); in lbmWrite()
2040 bp->l_flag = flag; in lbmWrite()
2048 tail = log->wqueue; in lbmWrite()
2051 if (bp->l_wqnext == NULL) { in lbmWrite()
2054 log->wqueue = bp; in lbmWrite()
2055 bp->l_wqnext = bp; in lbmWrite()
2057 log->wqueue = bp; in lbmWrite()
2058 bp->l_wqnext = tail->l_wqnext; in lbmWrite()
2059 tail->l_wqnext = bp; in lbmWrite()
2066 if ((bp != tail->l_wqnext) || !(flag & lbmWRITE)) { in lbmWrite()
2094 bp, flag, bp->l_pn); in lbmDirectWrite()
2099 bp->l_flag = flag | lbmDIRECT; in lbmDirectWrite()
2102 bp->l_blkno = in lbmDirectWrite()
2103 log->base + (bp->l_pn << (L2LOGPSIZE - log->l2bsize)); in lbmDirectWrite()
2124 struct jfs_log *log = bp->l_log; in lbmStartIO()
2129 bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9); in lbmStartIO()
2130 bio_set_dev(bio, log->bdev); in lbmStartIO()
2132 bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset); in lbmStartIO()
2133 BUG_ON(bio->bi_iter.bi_size != LOGPSIZE); in lbmStartIO()
2135 bio->bi_end_io = lbmIODone; in lbmStartIO()
2136 bio->bi_private = bp; in lbmStartIO()
2137 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC; in lbmStartIO()
2140 if (log->no_integrity) { in lbmStartIO()
2141 bio->bi_iter.bi_size = 0; in lbmStartIO()
2158 jfs_info("lbmIOWait1: bp:0x%p flag:0x%x:0x%x", bp, bp->l_flag, flag); in lbmIOWait()
2162 LCACHE_SLEEP_COND(bp->l_ioevent, (bp->l_flag & lbmDONE), flags); in lbmIOWait()
2164 rc = (bp->l_flag & lbmERROR) ? -EIO : 0; in lbmIOWait()
2171 jfs_info("lbmIOWait2: bp:0x%p flag:0x%x:0x%x", bp, bp->l_flag, flag); in lbmIOWait()
2178 * executed at INTIODONE level
2182 struct lbuf *bp = bio->bi_private; in lbmIODone()
2190 jfs_info("lbmIODone: bp:0x%p flag:0x%x", bp, bp->l_flag); in lbmIODone()
2194 bp->l_flag |= lbmDONE; in lbmIODone()
2196 if (bio->bi_status) { in lbmIODone()
2197 bp->l_flag |= lbmERROR; in lbmIODone()
2207 if (bp->l_flag & lbmREAD) { in lbmIODone()
2208 bp->l_flag &= ~lbmREAD; in lbmIODone()
2213 LCACHE_WAKEUP(&bp->l_ioevent); in lbmIODone()
2223 * if single-commit/full-page pageout, remove the current buffer in lbmIODone()
2226 * otherwise, the partial-page pageout buffer stays at in lbmIODone()
2228 * by lmGroupCommit() until full-page pageout is completed. in lbmIODone()
2230 bp->l_flag &= ~lbmWRITE; in lbmIODone()
2234 log = bp->l_log; in lbmIODone()
2235 log->clsn = (bp->l_pn << L2LOGPSIZE) + bp->l_ceor; in lbmIODone()
2237 if (bp->l_flag & lbmDIRECT) { in lbmIODone()
2238 LCACHE_WAKEUP(&bp->l_ioevent); in lbmIODone()
2243 tail = log->wqueue; in lbmIODone()
2247 /* remove head buffer of full-page pageout in lbmIODone()
2250 if (bp->l_flag & lbmRELEASE) { in lbmIODone()
2251 log->wqueue = NULL; in lbmIODone()
2252 bp->l_wqnext = NULL; in lbmIODone()
2257 /* remove head buffer of full-page pageout in lbmIODone()
2260 if (bp->l_flag & lbmRELEASE) { in lbmIODone()
2261 nextbp = tail->l_wqnext = bp->l_wqnext; in lbmIODone()
2262 bp->l_wqnext = NULL; in lbmIODone()
2274 if (nextbp->l_flag & lbmWRITE) { in lbmIODone()
2288 * (e.g., synchronous write of partial-page with COMMIT): in lbmIODone()
2291 if (bp->l_flag & lbmSYNC) { in lbmIODone()
2295 LCACHE_WAKEUP(&bp->l_ioevent); in lbmIODone()
2301 else if (bp->l_flag & lbmGC) { in lbmIODone()
2313 assert(bp->l_flag & lbmRELEASE); in lbmIODone()
2314 assert(bp->l_flag & lbmFREE); in lbmIODone()
2328 log_redrive_list = bp->l_redrive_next; in jfsIOWait()
2329 bp->l_redrive_next = NULL; in jfsIOWait()
2355 * log - volume log
2356 * logAddress - start address of log space in FS block
2357 * logSize - length of log space in FS block;
2359 * RETURN: 0 - success
2360 * -EIO - i/o error
2367 int rc = -EIO; in lmLogFormat()
2379 sbi = list_entry(log->sb_list.next, struct jfs_sb_info, log_list); in lmLogFormat()
2384 npages = logSize >> sbi->l2nbperpage; in lmLogFormat()
2389 * page 0 - reserved; in lmLogFormat()
2390 * page 1 - log superblock; in lmLogFormat()
2391 * page 2 - log data page: A SYNC log record is written in lmLogFormat()
2393 * pages 3-N - log data page: set to empty log data pages; in lmLogFormat()
2398 logsuper = (struct logsuper *) bp->l_ldata; in lmLogFormat()
2400 logsuper->magic = cpu_to_le32(LOGMAGIC); in lmLogFormat()
2401 logsuper->version = cpu_to_le32(LOGVERSION); in lmLogFormat()
2402 logsuper->state = cpu_to_le32(LOGREDONE); in lmLogFormat()
2403 logsuper->flag = cpu_to_le32(sbi->mntflag); /* ? */ in lmLogFormat()
2404 logsuper->size = cpu_to_le32(npages); in lmLogFormat()
2405 logsuper->bsize = cpu_to_le32(sbi->bsize); in lmLogFormat()
2406 logsuper->l2bsize = cpu_to_le32(sbi->l2bsize); in lmLogFormat()
2407 logsuper->end = cpu_to_le32(2 * LOGPSIZE + LOGPHDRSIZE + LOGRDSIZE); in lmLogFormat()
2409 bp->l_flag = lbmWRITE | lbmSYNC | lbmDIRECT; in lmLogFormat()
2410 bp->l_blkno = logAddress + sbi->nbperpage; in lmLogFormat()
2416 * init pages 2 to npages-1 as log data pages: in lmLogFormat()
2420 * pn: 0 1 2 3 n-1 in lmLogFormat()
2421 * +-----+-----+=====+=====+===.....===+=====+ in lmLogFormat()
2422 * lspn: N-1 0 1 N-2 in lmLogFormat()
2423 * <--- N page circular file ----> in lmLogFormat()
2425 * the N (= npages-2) data pages of the log is maintained as in lmLogFormat()
2435 * the lspn starting from 0, ... (N-2) in lmLogFormat()
2437 lp = (struct logpage *) bp->l_ldata; in lmLogFormat()
2439 * initialize 1st log page to be written: lpsn = N - 1, in lmLogFormat()
2442 lp->h.page = lp->t.page = cpu_to_le32(npages - 3); in lmLogFormat()
2443 lp->h.eor = lp->t.eor = cpu_to_le16(LOGPHDRSIZE + LOGRDSIZE); in lmLogFormat()
2445 lrd_ptr = (struct lrd *) &lp->data; in lmLogFormat()
2446 lrd_ptr->logtid = 0; in lmLogFormat()
2447 lrd_ptr->backchain = 0; in lmLogFormat()
2448 lrd_ptr->type = cpu_to_le16(LOG_SYNCPT); in lmLogFormat()
2449 lrd_ptr->length = 0; in lmLogFormat()
2450 lrd_ptr->log.syncpt.sync = 0; in lmLogFormat()
2452 bp->l_blkno += sbi->nbperpage; in lmLogFormat()
2453 bp->l_flag = lbmWRITE | lbmSYNC | lbmDIRECT; in lmLogFormat()
2459 * initialize succeeding log pages: lpsn = 0, 1, ..., (N-2) in lmLogFormat()
2461 for (lspn = 0; lspn < npages - 3; lspn++) { in lmLogFormat()
2462 lp->h.page = lp->t.page = cpu_to_le32(lspn); in lmLogFormat()
2463 lp->h.eor = lp->t.eor = cpu_to_le16(LOGPHDRSIZE); in lmLogFormat()
2465 bp->l_blkno += sbi->nbperpage; in lmLogFormat()
2466 bp->l_flag = lbmWRITE | lbmSYNC | lbmDIRECT; in lmLogFormat()