• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/fs/xfs/

Lines Matching refs:btree

219 		 * Update the btree to show the new hashval for this child.
325 size = (int)((char *)&oldroot->btree[be16_to_cpu(oldroot->hdr.count)] -
348 node->btree[0].hashval = cpu_to_be32(blk1->hashval);
349 node->btree[0].before = cpu_to_be32(blk1->blkno);
350 node->btree[1].hashval = cpu_to_be32(blk2->hashval);
351 node->btree[1].before = cpu_to_be32(blk2->blkno);
365 XFS_DA_LOGRANGE(node, node->btree,
458 * Balance the btree elements between two intermediate nodes,
479 ((be32_to_cpu(node2->btree[0].hashval) < be32_to_cpu(node1->btree[0].hashval)) ||
480 (be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval) <
481 be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval)))) {
501 btree_s = &node2->btree[0];
502 btree_d = &node2->btree[count];
512 btree_s = &node1->btree[be16_to_cpu(node1->hdr.count) - count];
513 btree_d = &node2->btree[0];
523 btree_s = &node2->btree[0];
524 btree_d = &node1->btree[be16_to_cpu(node1->hdr.count)];
535 btree_s = &node2->btree[count];
536 btree_d = &node2->btree[0];
549 sizeof(node2->btree[0]) * be16_to_cpu(node2->hdr.count)));
557 blk1->hashval = be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval);
558 blk2->hashval = be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval);
577 xfs_da_node_entry_t *btree;
592 btree = &node->btree[ oldblk->index ];
594 tmp = (be16_to_cpu(node->hdr.count) - oldblk->index) * (uint)sizeof(*btree);
595 memmove(btree + 1, btree, tmp);
597 btree->hashval = cpu_to_be32(newblk->hashval);
598 btree->before = cpu_to_be32(newblk->blkno);
600 XFS_DA_LOGRANGE(node, btree, tmp + sizeof(*btree)));
608 oldblk->hashval = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1 ].hashval);
731 child = be32_to_cpu(oldroot->btree[0].before);
889 xfs_da_node_entry_t *btree;
915 btree = &node->btree[ blk->index ];
916 if (be32_to_cpu(btree->hashval) == lasthash)
919 btree->hashval = cpu_to_be32(lasthash);
921 XFS_DA_LOGRANGE(node, btree, sizeof(*btree)));
923 lasthash = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval);
934 xfs_da_node_entry_t *btree;
944 btree = &node->btree[drop_blk->index];
948 memmove(btree, btree + 1, tmp);
950 XFS_DA_LOGRANGE(node, btree, tmp));
951 btree = &node->btree[be16_to_cpu(node->hdr.count)-1];
953 memset((char *)btree, 0, sizeof(xfs_da_node_entry_t));
955 XFS_DA_LOGRANGE(node, btree, sizeof(*btree)));
963 btree--;
964 drop_blk->hashval = be32_to_cpu(btree->hashval);
968 * Unbalance the btree elements between two intermediate nodes,
976 xfs_da_node_entry_t *btree;
990 if ((be32_to_cpu(drop_node->btree[0].hashval) < be32_to_cpu(save_node->btree[ 0 ].hashval)) ||
991 (be32_to_cpu(drop_node->btree[be16_to_cpu(drop_node->hdr.count)-1].hashval) <
992 be32_to_cpu(save_node->btree[be16_to_cpu(save_node->hdr.count)-1].hashval)))
994 btree = &save_node->btree[be16_to_cpu(drop_node->hdr.count)];
996 memmove(btree, &save_node->btree[0], tmp);
997 btree = &save_node->btree[0];
999 XFS_DA_LOGRANGE(save_node, btree,
1003 btree = &save_node->btree[be16_to_cpu(save_node->hdr.count)];
1005 XFS_DA_LOGRANGE(save_node, btree,
1014 memcpy(btree, &drop_node->btree[0], tmp);
1024 save_blk->hashval = be32_to_cpu(save_node->btree[be16_to_cpu(save_node->hdr.count)-1].hashval);
1048 xfs_da_node_entry_t *btree;
1087 blk->hashval = be32_to_cpu(node->btree[max-1].hashval);
1094 for (btree = &node->btree[probe]; span > 4;
1095 btree = &node->btree[probe]) {
1097 btreehashval = be32_to_cpu(btree->hashval);
1106 ASSERT((span <= 4) || (be32_to_cpu(btree->hashval) == hashval));
1112 while ((probe > 0) && (be32_to_cpu(btree->hashval) >= hashval)) {
1113 btree--;
1116 while ((probe < max) && (be32_to_cpu(btree->hashval) < hashval)) {
1117 btree++;
1126 blkno = be32_to_cpu(node->btree[max-1].before);
1129 blkno = be32_to_cpu(btree->before);
1284 ((be32_to_cpu(node2->btree[0].hashval) <
1285 be32_to_cpu(node1->btree[0].hashval)) ||
1286 (be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval) <
1287 be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval)))) {
1307 return be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval);
1418 blkno = be32_to_cpu(node->btree[blk->index].before);
1422 blkno = be32_to_cpu(node->btree[blk->index].before);
1460 blk->hashval = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval);
1465 blkno = be32_to_cpu(node->btree[blk->index].before);
1550 * Add a block to the btree ahead of the file.
1653 * Ick. We need to always be able to remove a btree block, even
1655 * This is called if xfs_bunmapi on a btree block fails due to ENOSPC.
1658 * a bmap btree split to do that.
1693 * Read the last block in the btree space.
1715 dead_hash = be32_to_cpu(dead_node->btree[be16_to_cpu(dead_node->hdr.count) - 1].hashval);
1782 be32_to_cpu(par_node->btree[entno].hashval) < dead_hash;
1791 par_blkno = be32_to_cpu(par_node->btree[entno].before);
1804 be32_to_cpu(par_node->btree[entno].before) != last_blkno;
1834 par_node->btree[entno].before = cpu_to_be32(dead_blkno);
1836 XFS_DA_LOGRANGE(par_node, &par_node->btree[entno].before,
1837 sizeof(par_node->btree[entno].before)));
1853 * Remove a btree block from a directory or attribute.
1895 * See if the mapping(s) for this btree block are valid, i.e.