Lines Matching defs:B_PAGE_SIZE

476 	return (areaSize / B_PAGE_SIZE + 1) / 2;
503 addr_t pageIndex = (pageAddress - area->Base()) / B_PAGE_SIZE;
518 uint32 pageIndex = (pageAddress - area->Base()) / B_PAGE_SIZE;
576 map->Map(address, page->physical_page_number * B_PAGE_SIZE, protection,
591 map->Map(address, page->physical_page_number * B_PAGE_SIZE, protection,
764 ssize_t pagesShifted = (oldSize - area->Size()) / B_PAGE_SIZE;
921 ssize_t secondAreaPagesShifted = secondAreaOffset / B_PAGE_SIZE;
1105 cache->GuardSize() / B_PAGE_SIZE, true, VM_PRIORITY_USER);
1335 if ((address % B_PAGE_SIZE) != 0
1372 pageAddress += B_PAGE_SIZE) {
1479 guardPages = guardSize / B_PAGE_SIZE;
1568 if (wiring == B_CONTIGUOUS && size == B_PAGE_SIZE
1621 reservedPages += size / B_PAGE_SIZE;
1640 size / B_PAGE_SIZE, physicalAddressRestrictions, priority);
1665 isStack ? (min_c(2, size / B_PAGE_SIZE - guardPages)) : 0, guardPages,
1702 address += B_PAGE_SIZE, offset += B_PAGE_SIZE) {
1706 + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE)
1709 - KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE)
1739 virtualAddress += B_PAGE_SIZE, offset += B_PAGE_SIZE) {
1747 page = vm_lookup_page(physicalAddress / B_PAGE_SIZE);
1773 = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
1780 + (area->Size() - 1); virtualAddress += B_PAGE_SIZE,
1781 offset += B_PAGE_SIZE, physicalAddress += B_PAGE_SIZE) {
1782 page = vm_lookup_page(physicalAddress / B_PAGE_SIZE);
1820 for (i = size / B_PAGE_SIZE; i-- > 0; pageNumber++) {
1862 mapOffset = physicalAddress % B_PAGE_SIZE;
1926 for (addr_t offset = 0; offset < size; offset += B_PAGE_SIZE) {
1974 if (vecs[i].base % B_PAGE_SIZE != 0
1975 || vecs[i].length % B_PAGE_SIZE != 0) {
2020 for (addr_t offset = 0; offset < size; offset += B_PAGE_SIZE) {
2032 vecOffset += B_PAGE_SIZE;
2116 page_num_t firstPage = cacheOffset / B_PAGE_SIZE;
2117 page_num_t endPage = firstPage + area->Size() / B_PAGE_SIZE;
2131 baseAddress + (page->cache_offset * B_PAGE_SIZE - cacheOffset),
2154 offset = ROUNDDOWN(offset, B_PAGE_SIZE);
2448 offset += B_PAGE_SIZE) {
2607 lowerCache->GuardSize() / B_PAGE_SIZE,
2640 copiedPage->physical_page_number * B_PAGE_SIZE,
2641 page->physical_page_number * B_PAGE_SIZE);
2647 page->cache_offset * B_PAGE_SIZE);
2970 status = cache->Commit(cache->page_count * B_PAGE_SIZE,
2986 && cache->page_count * 2 < area->Size() / B_PAGE_SIZE) {
3024 page_num_t firstPageOffset = area->cache_offset / B_PAGE_SIZE;
3026 = firstPageOffset + area->Size() / B_PAGE_SIZE;
3265 int32 offset = address & (B_PAGE_SIZE - 1);
3266 if (num * itemSize + offset > B_PAGE_SIZE) {
3267 num = (B_PAGE_SIZE - offset) / itemSize;
3271 address = ROUNDDOWN(address, B_PAGE_SIZE);
3367 copyAddress = ROUNDDOWN(copyAddress, B_PAGE_SIZE);
3769 sAvailableMemory, (phys_addr_t)vm_page_num_pages() * B_PAGE_SIZE);
3824 physicalAddress = page->physical_page_number * B_PAGE_SIZE;
3827 physicalAddress -= physicalAddress % B_PAGE_SIZE;
3884 virtualAddress -= virtualAddress % B_PAGE_SIZE;
3979 for (addr_t current = start; current < end; current += B_PAGE_SIZE) {
3985 vm_page* page = vm_lookup_page(physicalAddress / B_PAGE_SIZE);
4079 address = (void*)ROUNDDOWN(image->text_region.start, B_PAGE_SIZE);
4087 address = (void*)ROUNDDOWN(image->data_region.start, B_PAGE_SIZE);
4252 args->physical_allocated_range[i].size += B_PAGE_SIZE;
4253 return nextPage / B_PAGE_SIZE;
4261 nextPage = args->physical_allocated_range[i].start - B_PAGE_SIZE;
4272 args->physical_allocated_range[i].start -= B_PAGE_SIZE;
4273 args->physical_allocated_range[i].size += B_PAGE_SIZE;
4274 return nextPage / B_PAGE_SIZE;
4302 for (uint32 i = 0; i < PAGE_ALIGN(physicalSize) / B_PAGE_SIZE; i++) {
4309 arch_vm_translation_map_early_map(args, virtualBase + i * B_PAGE_SIZE,
4310 physicalAddress * B_PAGE_SIZE, attributes,
4333 sAvailableMemory = vm_page_num_pages() * B_PAGE_SIZE;
4381 address = (void*)ROUNDDOWN(heapBase, B_PAGE_SIZE);
4404 void* lastPage = (void*)ROUNDDOWN(~(addr_t)0, B_PAGE_SIZE);
4405 vm_block_address_range("overflow protection", lastPage, B_PAGE_SIZE);
4409 (void *)ROUNDDOWN(0xcccccccc, B_PAGE_SIZE), B_PAGE_SIZE * 64);
4413 (void *)ROUNDDOWN(0xdeadbeef, B_PAGE_SIZE), B_PAGE_SIZE * 64);
4426 if (vm_page_num_free_pages() >= 200 * 1024 * 1024 / B_PAGE_SIZE) {
4431 ROUNDUP(kCacheInfoTableCount * sizeof(cache_info), B_PAGE_SIZE),
4554 addr_t pageAddress = ROUNDDOWN(address, B_PAGE_SIZE);
4764 vec.base = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
4765 generic_size_t bytesRead = vec.length = B_PAGE_SIZE;
4833 vm_memcpy_physical_page(page->physical_page_number * B_PAGE_SIZE,
4834 sourcePage->physical_page_number * B_PAGE_SIZE);
4871 addr_t address = ROUNDDOWN(originalAddress, B_PAGE_SIZE);
4990 && (mappedPage = vm_lookup_page(physicalAddress / B_PAGE_SIZE))
5014 if (area->AddWaiterIfWired(&waiter, address, B_PAGE_SIZE,
5208 if (amount >= (vm_page_num_pages() * B_PAGE_SIZE)) {
5321 info->ram_size = cache->page_count * B_PAGE_SIZE;
5330 // is newSize a multiple of B_PAGE_SIZE?
5331 if (newSize & (B_PAGE_SIZE - 1))
5452 if ((oldSize / B_PAGE_SIZE) % 2 != 0) {
5539 if (size > B_PAGE_SIZE || ROUNDDOWN((addr_t)unsafeMemory, B_PAGE_SIZE)
5540 != ROUNDDOWN((addr_t)unsafeMemory + size - 1, B_PAGE_SIZE)) {
5587 phys_addr_t physicalAddress = page->physical_page_number * B_PAGE_SIZE
5588 + (addr_t)unsafeMemory % B_PAGE_SIZE;
5701 addr_t pageAddress = ROUNDDOWN((addr_t)address, B_PAGE_SIZE);
5702 info->range.SetTo(pageAddress, B_PAGE_SIZE, writable, false);
5751 && (page = vm_lookup_page(physicalAddress / B_PAGE_SIZE))
5783 = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE
5784 + address % B_PAGE_SIZE;
5851 addr_t lockBaseAddress = ROUNDDOWN((addr_t)address, B_PAGE_SIZE);
5852 addr_t lockEndAddress = ROUNDUP((addr_t)address + numBytes, B_PAGE_SIZE);
5928 for (; nextAddress != areaEnd; nextAddress += B_PAGE_SIZE) {
5935 && (page = vm_lookup_page(physicalAddress / B_PAGE_SIZE))
6010 addr_t lockBaseAddress = ROUNDDOWN((addr_t)address, B_PAGE_SIZE);
6011 addr_t lockEndAddress = ROUNDUP((addr_t)address + numBytes, B_PAGE_SIZE);
6085 for (; nextAddress != areaEnd; nextAddress += B_PAGE_SIZE) {
6092 && (page = vm_lookup_page(physicalAddress / B_PAGE_SIZE))
6155 addr_t pageOffset = virtualAddress & (B_PAGE_SIZE - 1);
6186 addr_t bytes = min_c(numBytes - offset, B_PAGE_SIZE);
6205 if (bytes > B_PAGE_SIZE - pageOffset)
6206 bytes = B_PAGE_SIZE - pageOffset;
6734 || (addr_t)address % B_PAGE_SIZE != 0) {
6763 || (addr_t)address % B_PAGE_SIZE != 0) {
6793 if ((address % B_PAGE_SIZE) != 0)
6897 pageAddress < currentAddress; pageAddress += B_PAGE_SIZE) {
6907 commitmentChange += B_PAGE_SIZE;
6909 commitmentChange -= B_PAGE_SIZE;
6922 pageAddress < currentAddress; pageAddress += B_PAGE_SIZE) {
6936 vm_page* page = vm_lookup_page(physicalAddress / B_PAGE_SIZE);
6974 if ((address % B_PAGE_SIZE) != 0)
7050 if ((address % B_PAGE_SIZE) != 0)
7129 if ((address % B_PAGE_SIZE) != 0)