Searched refs:PAGE_SIZE (Results 251 - 275 of 3549) sorted by last modified time

<<11121314151617181920>>

/linux-master/net/sunrpc/
H A Dsvcsock.c314 for (i = 0, t = 0; t < buflen; i++, t += PAGE_SIZE)
315 bvec_set_page(&bvec[i], rqstp->rq_pages[i], PAGE_SIZE, 0);
646 DIV_ROUND_UP(rqstp->rq_arg.page_len, PAGE_SIZE);
956 npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
975 npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
989 npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
/linux-master/net/rds/
H A Drdma.c61 return ((vec->addr + vec->bytes + PAGE_SIZE - 1) >> PAGE_SHIFT) -
282 sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0);
772 min_t(unsigned int, iov->bytes, PAGE_SIZE - offset),
/linux-master/net/bluetooth/
H A Dhci_core.c849 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
/linux-master/mm/
H A Dvmalloc.c99 unsigned long size = PAGE_SIZE;
110 if (size != PAGE_SIZE) {
350 } while (pte++, addr += PAGE_SIZE, addr != end);
504 } while (pte++, addr += PAGE_SIZE, addr != end);
641 * @pages: pages to map (always PAGE_SIZE pages)
681 * @pages: pages to map (always PAGE_SIZE pages)
1479 * parameters. Please note, with an alignment bigger than PAGE_SIZE,
1521 * that is bigger then PAGE_SIZE.
1761 * a) align <= PAGE_SIZE, because it does not make any sense.
1762 * All blocks(their start addresses) are at least PAGE_SIZE
[all...]
H A Dmemory.c349 * We add page table cache pages with PAGE_SIZE,
352 tlb_change_page_size(tlb, PAGE_SIZE);
1150 max_nr = (end - addr) / PAGE_SIZE;
1171 } while (dst_pte += nr, src_pte += nr, addr += PAGE_SIZE * nr,
1465 addr += PAGE_SIZE;
1582 tlb_change_page_size(tlb, PAGE_SIZE);
1604 max_nr = (end - addr) / PAGE_SIZE;
1609 addr += nr * PAGE_SIZE;
1664 } while (pte += nr, addr += PAGE_SIZE * nr, addr != end);
2090 addr += PAGE_SIZE;
[all...]
/linux-master/kernel/bpf/
H A Dsyscall.c90 if (unlikely(actual_size > PAGE_SIZE)) /* silly large */
300 } else if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
/linux-master/io_uring/
H A Dkbuf.c18 #define IO_BUFFER_LIST_BUF_PER_PAGE (PAGE_SIZE / sizeof(struct io_uring_buf))
/linux-master/init/
H A Dinitramfs.c604 start = round_down(phys_initrd_start, PAGE_SIZE);
606 size = round_up(size, PAGE_SIZE);
636 unsigned long aligned_start = ALIGN_DOWN(start, PAGE_SIZE);
637 unsigned long aligned_end = ALIGN(end, PAGE_SIZE);
/linux-master/include/net/mana/
H A Dmana.h45 #define EQ_SIZE (8 * PAGE_SIZE)
301 #define MANA_XDP_MTU_MAX (PAGE_SIZE - MANA_RXBUF_PAD - XDP_PACKET_HEADROOM)
/linux-master/include/linux/
H A Dfs.h3467 #define SIMPLE_TRANSACTION_LIMIT (PAGE_SIZE - sizeof(struct simple_transaction_argresp))
/linux-master/fs/xfs/
H A Dxfs_super.c341 if (mp->m_super->s_blocksize != PAGE_SIZE) {
1629 if (mp->m_sb.sb_blocksize > PAGE_SIZE) {
1633 mp->m_sb.sb_blocksize, PAGE_SIZE);
H A Dxfs_buf.c87 return (bp->b_page_count * PAGE_SIZE);
375 bp->b_page_count = DIV_ROUND_UP(BBTOB(bp->b_length), PAGE_SIZE);
642 } else if (BBTOB(new_bp->b_length) >= PAGE_SIZE ||
1539 while (offset >= PAGE_SIZE) {
1541 offset -= PAGE_SIZE;
1562 int rbytes, nbytes = PAGE_SIZE - offset;
1792 return page_address(page) + (offset & (PAGE_SIZE-1));
1811 csize = min_t(size_t, PAGE_SIZE - page_offset,
1814 ASSERT((csize + page_offset) <= PAGE_SIZE);
/linux-master/fs/smb/client/
H A Dfile.c49 XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE);
53 end = (start + len - 1) / PAGE_SIZE;
77 XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE);
84 end = (start + len - 1) / PAGE_SIZE;
110 XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE);
117 end = (start + len - 1) / PAGE_SIZE;
143 XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE);
150 end = (start + len - 1) / PAGE_SIZE;
1570 PAGE_SIZE);
1572 PAGE_SIZE);
[all...]
/linux-master/fs/romfs/
H A Dsuper.c122 fillsize = size > PAGE_SIZE ? PAGE_SIZE : size;
134 if (fillsize < PAGE_SIZE)
135 memset(buf + fillsize, 0, PAGE_SIZE - fillsize);
/linux-master/fs/nfsd/
H A Dnfs4state.c1980 > PAGE_SIZE);
/linux-master/fs/jfs/
H A Djfs_logmgr.c1821 for (offset = 0; offset < PAGE_SIZE; offset += LOGPSIZE) {
/linux-master/fs/f2fs/
H A Dsuper.c4897 if (PAGE_SIZE != F2FS_BLKSIZE) {
4898 printk("F2FS not supported on PAGE_SIZE(%lu) != BLOCK_SIZE(%lu)\n",
4899 PAGE_SIZE, F2FS_BLKSIZE);
/linux-master/fs/ext4/
H A Dsuper.c4227 memset(buf, 0, PAGE_SIZE);
4403 if (sb->s_blocksize <= PAGE_SIZE)
4725 if (sb->s_blocksize == PAGE_SIZE)
/linux-master/fs/cramfs/
H A Dinode.c155 * BLKS_PER_BUF*PAGE_SIZE, so that the caller doesn't need to
173 #define BUFFER_SIZE (BLKS_PER_BUF*PAGE_SIZE)
196 offset &= PAGE_SIZE - 1;
242 memcpy_from_page(data, page, 0, PAGE_SIZE);
245 memset(data, 0, PAGE_SIZE);
246 data += PAGE_SIZE;
303 u32 block_off = i * (PAGE_SIZE >> CRAMFS_BLK_DIRECT_PTR_SHIFT);
343 return memchr_inv(tail_data, 0, PAGE_SIZE - partial) ? true : false;
369 max_pages = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
403 pages * PAGE_SIZE, vm
[all...]
/linux-master/fs/bcachefs/
H A Dutil.c637 unsigned len = min_t(size_t, PAGE_SIZE - offset, size);
649 unsigned len = min_t(size_t, PAGE_SIZE, size);
/linux-master/fs/
H A Daio.c269 inode->i_size = PAGE_SIZE * nr_pages;
516 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring))
549 ctx->mmap_size = nr_pages * PAGE_SIZE;
586 #define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event))
587 #define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
/linux-master/drivers/scsi/
H A Dmyrs.c2308 if (mmio_size < PAGE_SIZE)
2309 mmio_size = PAGE_SIZE;
H A Dmyrb.c3434 if (mmio_size < PAGE_SIZE)
3435 mmio_size = PAGE_SIZE;
/linux-master/drivers/net/
H A Dxen-netfront.c287 skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
762 if (np->bounce || unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
789 if (offset == PAGE_SIZE) {
1186 rx.offset, rx.status, PAGE_SIZE);
/linux-master/drivers/net/ethernet/microsoft/mana/
H A Dmana_en.c635 if (mpc->rxbpre_alloc_size > PAGE_SIZE) {
1615 } else if (rxq->alloc_size > PAGE_SIZE) {

Completed in 441 milliseconds

<<11121314151617181920>>