• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/fs/jfs/

Lines Matching refs:tblk

164 static int diLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
166 static int dataLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
168 static void dtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
170 static void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
173 struct tblock * tblk);
174 static void txForce(struct tblock * tblk);
175 static int txLog(struct jfs_log * log, struct tblock * tblk,
177 static void txUpdateMap(struct tblock * tblk);
178 static void txRelease(struct tblock * tblk);
179 static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
364 struct tblock *tblk;
409 tblk = tid_to_tblock(t);
411 if ((tblk->next == 0) && !(flag & COMMIT_FORCE)) {
412 /* Don't let a non-forced transaction take the last tblk */
419 TxAnchor.freetid = tblk->next;
427 * awakened after sleeping on tblk->waitor
429 * memset(tblk, 0, sizeof(struct tblock));
431 tblk->next = tblk->last = tblk->xflag = tblk->flag = tblk->lsn = 0;
433 tblk->sb = sb;
435 tblk->logtid = log->logtid;
502 struct tblock *tblk = tid_to_tblock(tid);
512 TXN_WAKEUP(&tblk->waitor);
514 log = JFS_SBI(tblk->sb)->log;
524 if (tblk->flag & tblkGC_LAZY) {
525 jfs_info("txEnd called w/lazy tid: %d, tblk = 0x%p", tid, tblk);
529 tblk->flag |= tblkGC_UNLOCKED;
534 jfs_info("txEnd: tid: %d, tblk = 0x%p", tid, tblk);
536 assert(tblk->next == 0);
541 tblk->next = TxAnchor.freetid;
601 struct tblock *tblk;
643 tblk = tid_to_tblock(tid);
678 if (tblk->next)
679 lid_to_tlock(tblk->last)->next = lid;
681 tblk->next = lid;
683 tblk->last = lid;
745 tblk = tid_to_tblock(tid);
746 if (tblk->next)
747 lid_to_tlock(tblk->last)->next = lid;
749 tblk->next = lid;
751 tblk->last = lid;
835 dump_mem("Locker's tblk", tid_to_tblock(tid),
868 * tblk -
872 static void txRelease(struct tblock * tblk)
880 for (lid = tblk->next; lid; lid = tlck->next) {
893 TXN_WAKEUP(&tblk->waitor);
904 static void txUnlock(struct tblock * tblk)
914 jfs_info("txUnlock: tblk = 0x%p", tblk);
915 log = JFS_SBI(tblk->sb)->log;
920 for (lid = tblk->next; lid; lid = next) {
941 logdiff(difft, tblk->clsn, log);
944 mp->clsn = tblk->clsn;
946 mp->clsn = tblk->clsn;
970 tblk->next = tblk->last = 0;
974 * (allocation map pages inherited lsn of tblk and
977 if (tblk->lsn) {
980 list_del(&tblk->synclist);
995 struct tblock *tblk;
1026 tblk = tid_to_tblock(tid);
1027 if (tblk->next)
1028 lid_to_tlock(tblk->last)->next = lid;
1030 tblk->next = lid;
1032 tblk->last = lid;
1139 struct tblock *tblk;
1160 tblk = tid_to_tblock(tid);
1170 lrd->logtid = cpu_to_le32(tblk->logtid);
1173 tblk->xflag |= flag;
1176 tblk->xflag |= COMMIT_LAZY;
1233 * && (tblk->flag & COMMIT_DELETE) == 0)
1246 lid_to_tlock(jfs_ip->atltail)->next = tblk->next;
1247 tblk->next = jfs_ip->atlhead;
1248 if (!tblk->last)
1249 tblk->last = jfs_ip->atltail;
1258 * (become first tlock of the tblk's tlock list)
1269 if ((rc = txLog(log, tblk, &cd)))
1276 if (tblk->xflag & COMMIT_DELETE) {
1277 atomic_inc(&tblk->u.ip->i_count);
1288 if (tblk->u.ip->i_state & I_LOCK)
1289 tblk->xflag &= ~COMMIT_LAZY;
1292 ASSERT((!(tblk->xflag & COMMIT_DELETE)) ||
1293 ((tblk->u.ip->i_nlink == 0) &&
1294 !test_cflag(COMMIT_Nolink, tblk->u.ip)));
1301 lsn = lmLog(log, tblk, lrd, NULL);
1303 lmGroupCommit(log, tblk);
1314 txForce(tblk);
1325 if (tblk->xflag & COMMIT_FORCE)
1326 txUpdateMap(tblk);
1331 txRelease(tblk);
1333 if ((tblk->flag & tblkGC_LAZY) == 0)
1334 txUnlock(tblk);
1371 static int txLog(struct jfs_log * log, struct tblock * tblk, struct commit * cd)
1382 for (lid = tblk->next; lid; lid = tlck->next) {
1396 xtLog(log, tblk, lrd, tlck);
1400 dtLog(log, tblk, lrd, tlck);
1404 diLog(log, tblk, lrd, tlck, cd);
1408 mapLog(log, tblk, lrd, tlck);
1412 dataLog(log, tblk, lrd, tlck);
1428 static int diLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
1452 mp->logical_size >> tblk->sb->s_blocksize_bits);
1453 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1490 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1525 cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1541 static int dataLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
1572 PXDlength(pxd, mp->logical_size >> tblk->sb->s_blocksize_bits);
1574 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1587 static void dtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
1623 mp->logical_size >> tblk->sb->s_blocksize_bits);
1624 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1652 mp->logical_size >> tblk->sb->s_blocksize_bits);
1653 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1675 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1690 static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
1742 mp->logical_size >> tblk->sb->s_blocksize_bits);
1743 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1763 if ((xadlock->count <= 4) && (tblk->xflag & COMMIT_LAZY)) {
1789 tblk->xflag &= ~COMMIT_LAZY;
1826 if (tblk->xflag & COMMIT_TRUNCATE) {
1831 mp->logical_size >> tblk->sb->
1834 cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1840 cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1858 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1866 if ((xadlock->count <= 4) && (tblk->xflag & COMMIT_LAZY)) {
1892 tblk->xflag &= ~COMMIT_LAZY;
1900 if (((tblk->xflag & COMMIT_PWMAP) || S_ISDIR(ip->i_mode))
1904 else (tblk->xflag & COMMIT_PMAP)
1932 tblk->xflag &= ~COMMIT_LAZY;
1953 mp->logical_size >> tblk->sb->s_blocksize_bits);
1954 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1973 cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1996 cpu_to_le32(lmLog(log, tblk, lrd, tlck));
2076 static void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
2098 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
2114 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
2146 cpu_to_le32(lmLog(log, tblk, lrd, NULL));
2215 static void txForce(struct tblock * tblk)
2226 tlck = lid_to_tlock(tblk->next);
2232 tlck->next = tblk->next;
2233 tblk->next = lid;
2241 for (lid = tblk->next; lid; lid = next) {
2267 static void txUpdateMap(struct tblock * tblk)
2279 ipimap = JFS_SBI(tblk->sb)->ipimap;
2281 maptype = (tblk->xflag & COMMIT_PMAP) ? COMMIT_PMAP : COMMIT_PWMAP;
2296 for (lid = tblk->next; lid; lid = tlck->next) {
2330 txAllocPMap(ipimap, maplock, tblk);
2350 tblk, COMMIT_PWMAP);
2353 tblk, maptype);
2357 if (!(tblk->flag & tblkGC_LAZY)) {
2377 if (tblk->xflag & COMMIT_CREATE) {
2378 diUpdatePMap(ipimap, tblk->ino, false, tblk);
2383 pxdlock.pxd = tblk->u.ixpxd;
2385 txAllocPMap(ipimap, (struct maplock *) & pxdlock, tblk);
2386 } else if (tblk->xflag & COMMIT_DELETE) {
2387 ip = tblk->u.ip;
2388 diUpdatePMap(ipimap, ip->i_ino, true, tblk);
2414 struct tblock * tblk)
2437 (s64) xlen, tblk);
2447 dbUpdatePMap(ipbmap, false, xaddr, (s64) xlen, tblk);
2457 tblk);
2472 struct maplock * maplock, struct tblock * tblk, int maptype)
2484 jfs_info("txFreeMap: tblk:0x%p maplock:0x%p maptype:0x%x",
2485 tblk, maplock, maptype);
2499 (s64) xlen, tblk);
2510 tblk);
2521 (s64) xlen, tblk);
2620 struct tblock *tblk = tid_to_tblock(tid);
2626 for (lid = tblk->next; lid; lid = next) {
2654 tblk->next = tblk->last = 0;
2660 jfs_error(tblk->sb, "txAbort");
2673 static void txLazyCommit(struct tblock * tblk)
2677 while (((tblk->flag & tblkGC_READY) == 0) &&
2678 ((tblk->flag & tblkGC_UNLOCKED) == 0)) {
2681 jfs_info("jfs_lazycommit: tblk 0x%p not unlocked", tblk);
2685 jfs_info("txLazyCommit: processing tblk 0x%p", tblk);
2687 txUpdateMap(tblk);
2689 log = (struct jfs_log *) JFS_SBI(tblk->sb)->log;
2693 tblk->flag |= tblkGC_COMMITTED;
2695 if (tblk->flag & tblkGC_READY)
2698 wake_up_all(&tblk->gcwait); // LOGGC_WAKEUP
2701 * Can't release log->gclock until we've tested tblk->flag
2703 if (tblk->flag & tblkGC_LAZY) {
2705 txUnlock(tblk);
2706 tblk->flag &= ~tblkGC_LAZY;
2707 txEnd(tblk - TxBlock); /* Convert back to tid */
2711 jfs_info("txLazyCommit: done: tblk = 0x%p", tblk);
2724 struct tblock *tblk;
2733 list_for_each_entry(tblk, &TxAnchor.unlock_queue,
2736 sbi = JFS_SBI(tblk->sb);
2740 * is handling a tblk for this superblock,
2752 list_del(&tblk->cqueue);
2755 txLazyCommit(tblk);
2796 void txLazyUnlock(struct tblock * tblk)
2802 list_add_tail(&tblk->cqueue, &TxAnchor.unlock_queue);
2807 if (!(JFS_SBI(tblk->sb)->commit_state & IN_LAZYCOMMIT) &&