/linux-master/drivers/gpu/drm/vmwgfx/ |
H A D | vmwgfx_bo.h | 126 void *vmw_bo_map_and_cache(struct vmw_bo *vbo); 127 void vmw_bo_unmap(struct vmw_bo *vbo); 139 * @vbo: The struct vmw_bo 141 static inline void vmw_bo_prio_adjust(struct vmw_bo *vbo) argument 143 int i = ARRAY_SIZE(vbo->res_prios); 146 if (vbo->res_prios[i]) { 147 vbo->tbo.priority = i; 152 vbo->tbo.priority = 3; 158 * @vbo: The struct vmw_bo 164 static inline void vmw_bo_prio_add(struct vmw_bo *vbo, in argument 179 vmw_bo_prio_del(struct vmw_bo *vbo, int prio) argument 200 vmw_user_bo_ref(struct vmw_bo *vbo) argument [all...] |
H A D | vmwgfx_page_dirty.c | 76 * @vbo: The buffer object to scan 82 static void vmw_bo_dirty_scan_pagetable(struct vmw_bo *vbo) argument 84 struct vmw_bo_dirty *dirty = vbo->dirty; 85 pgoff_t offset = drm_vma_node_start(&vbo->tbo.base.vma_node); 86 struct address_space *mapping = vbo->tbo.bdev->dev_mapping; 113 * @vbo: The buffer object to scan 120 static void vmw_bo_dirty_scan_mkwrite(struct vmw_bo *vbo) argument 122 struct vmw_bo_dirty *dirty = vbo->dirty; 123 unsigned long offset = drm_vma_node_start(&vbo->tbo.base.vma_node); 124 struct address_space *mapping = vbo 163 vmw_bo_dirty_scan(struct vmw_bo *vbo) argument 184 vmw_bo_dirty_pre_unmap(struct vmw_bo *vbo, pgoff_t start, pgoff_t end) argument 209 vmw_bo_dirty_unmap(struct vmw_bo *vbo, pgoff_t start, pgoff_t end) argument 230 vmw_bo_dirty_add(struct vmw_bo *vbo) argument 287 vmw_bo_dirty_release(struct vmw_bo *vbo) argument 309 struct vmw_bo *vbo = res->guest_memory_bo; local 356 struct vmw_bo *vbo = res->guest_memory_bo; local 383 struct vmw_bo *vbo = to_vmw_bo(&bo->base); local 421 struct vmw_bo *vbo = to_vmw_bo(&bo->base); local [all...] |
H A D | vmwgfx_bo.c | 35 static void vmw_bo_release(struct vmw_bo *vbo) argument 37 WARN_ON(vbo->tbo.base.funcs && 38 kref_read(&vbo->tbo.base.refcount) != 0); 39 vmw_bo_unmap(vbo); 40 drm_gem_object_release(&vbo->tbo.base); 50 struct vmw_bo *vbo = to_vmw_bo(&bo->base); local 52 WARN_ON(vbo->dirty); 53 WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree)); 54 vmw_bo_release(vbo); 55 kfree(vbo); 278 vmw_bo_pin_reserved(struct vmw_bo *vbo, bool pin) argument 326 vmw_bo_map_and_cache(struct vmw_bo *vbo) argument 353 vmw_bo_unmap(struct vmw_bo *vbo) argument 527 struct vmw_bo *vbo; local 674 struct vmw_bo *vbo; local 727 struct vmw_bo *vbo = to_vmw_bo(&bo->base); local [all...] |
H A D | vmwgfx_validation.c | 168 * @vbo: The buffer object to search for. 175 struct vmw_bo *vbo) 184 unsigned long key = (unsigned long) vbo; 196 if (entry->base.bo == &vbo->tbo) { 259 * @vbo: The buffer object. 264 struct vmw_bo *vbo) 268 bo_node = vmw_validation_find_bo_dup(ctx, vbo); 277 bo_node->hash.key = (unsigned long) vbo; 282 val_buf->bo = ttm_bo_get_unless_zero(&vbo->tbo); 394 * @vbo 174 vmw_validation_find_bo_dup(struct vmw_validation_context *ctx, struct vmw_bo *vbo) argument 263 vmw_validation_add_bo(struct vmw_validation_context *ctx, struct vmw_bo *vbo) argument 398 vmw_validation_res_switch_backup(struct vmw_validation_context *ctx, void *val_private, struct vmw_bo *vbo, unsigned long guest_memory_offset) argument 441 struct vmw_bo *vbo = res->guest_memory_bo; local 515 struct vmw_bo *vbo = to_vmw_bo(&bo->base); local 556 struct vmw_bo *vbo = to_vmw_bo(&entry->base.bo->base); local 618 struct vmw_bo *vbo = res->guest_memory_bo; local 856 struct vmw_bo *vbo = to_vmw_bo(&entry->base.bo->base); local [all...] |
H A D | vmwgfx_gem.c | 54 struct vmw_bo *vbo = to_vmw_bo(obj); local 61 vmw_bo_pin_reserved(vbo, do_pin); 159 struct vmw_bo *vbo; local 173 ret = vmw_bo_create(dev_priv, ¶ms, &vbo); 177 vbo->tbo.base.funcs = &vmw_gem_object_funcs; 179 gem = &vbo->tbo.base; 193 struct vmw_bo *vbo; local 198 req->size, &handle, &vbo); 203 rep->map_handle = drm_vma_node_offset_addr(&vbo->tbo.base.vma_node); 207 drm_gem_object_put(&vbo [all...] |
H A D | vmwgfx_resource.c | 743 * @vbo: Pointer to the current backing MOB. 751 void vmw_resource_unbind_list(struct vmw_bo *vbo) argument 754 .bo = &vbo->tbo, 758 dma_resv_assert_held(vbo->tbo.base.resv); 759 while (!RB_EMPTY_ROOT(&vbo->res_tree)) { 760 struct rb_node *node = vbo->res_tree.rb_node; 772 (void) ttm_bo_wait(&vbo->tbo, false, false); 969 struct vmw_bo *vbo = NULL; local 972 vbo = res->guest_memory_bo; 974 ret = ttm_bo_reserve(&vbo 1030 struct vmw_bo *vbo = res->guest_memory_bo; local 1075 vmw_resources_clean(struct vmw_bo *vbo, pgoff_t start, pgoff_t end, pgoff_t *num_prefault) argument [all...] |
H A D | vmwgfx_validation.h | 162 struct vmw_bo *vbo); 178 struct vmw_bo *vbo,
|
H A D | vmwgfx_ttm_buffer.c | 568 struct vmw_bo *vbo; local 578 ret = vmw_bo_create(dev_priv, &bo_params, &vbo); 582 ret = ttm_bo_reserve(&vbo->tbo, false, true, NULL); 584 ret = vmw_ttm_populate(vbo->tbo.bdev, vbo->tbo.ttm, &ctx); 587 container_of(vbo->tbo.ttm, struct vmw_ttm_tt, dma_ttm); 591 ttm_bo_unreserve(&vbo->tbo); 594 *bo_p = vbo;
|
H A D | vmwgfx_drv.c | 393 struct vmw_bo *vbo; local 406 * Create the vbo as pinned, so that a tryreserve will 410 ret = vmw_bo_create(dev_priv, &bo_params, &vbo); 414 ret = ttm_bo_reserve(&vbo->tbo, false, true, NULL); 416 vmw_bo_pin_reserved(vbo, true); 418 ret = ttm_bo_kmap(&vbo->tbo, 0, 1, &map); 426 vmw_bo_pin_reserved(vbo, false); 427 ttm_bo_unreserve(&vbo->tbo); 431 vmw_bo_unreference(&vbo); 433 dev_priv->dummy_query_bo = vbo; [all...] |
H A D | vmwgfx_drv.h | 834 void vmw_resource_unbind_list(struct vmw_bo *vbo); 839 int vmw_resources_clean(struct vmw_bo *vbo, pgoff_t start, 1401 void vmw_bo_dirty_scan(struct vmw_bo *vbo); 1402 int vmw_bo_dirty_add(struct vmw_bo *vbo); 1405 void vmw_bo_dirty_release(struct vmw_bo *vbo); 1406 void vmw_bo_dirty_unmap(struct vmw_bo *vbo,
|
H A D | vmwgfx_kms.c | 225 static void vmw_du_destroy_cursor_mob(struct vmw_bo **vbo) argument 227 if (!(*vbo)) 230 ttm_bo_unpin(&(*vbo)->tbo); 231 vmw_bo_unreference(vbo); 624 struct vmw_bo *vbo = vps->cursor.bo; local 626 if (!vbo || !vbo->map.virtual) 629 ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL); 631 vmw_bo_unmap(vbo); 632 ttm_bo_unreserve(&vbo [all...] |
H A D | vmwgfx_execbuf.c | 62 * @vbo: Non ref-counted pointer to buffer object 68 struct vmw_bo *vbo; member in struct:vmw_relocation 1177 reloc->vbo = vmw_bo; 1234 reloc->vbo = vmw_bo; 1713 struct vmw_bo *vbo; local 1721 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo); 1725 vmw_validation_res_switch_backup(sw_context->ctx, info, vbo, 3767 bo = &reloc->vbo->tbo;
|
/linux-master/fs/ntfs3/ |
H A D | dir.c | 345 const struct INDEX_HDR *hdr, u64 vbo, u64 pos, 367 if (vbo + off < pos) 373 ctx->pos = vbo + off; 392 u64 vbo; local 460 vbo = (u64)bit << index_bits; 461 if (vbo >= i_size) { 475 vbo = (u64)bit << index_bits; 476 if (vbo >= i_size) { 488 vbo + sbi->record_size, pos, name, ctx); 344 ntfs_read_hdr(struct ntfs_sb_info *sbi, struct ntfs_inode *ni, const struct INDEX_HDR *hdr, u64 vbo, u64 pos, u8 *name, struct dir_context *ctx) argument
|
H A D | attrib.c | 1228 u64 vbo; local 1239 vbo = page->index << PAGE_SHIFT; 1241 if (vbo < data_size) { 1244 u32 use = data_size - vbo; 1249 memcpy(kaddr, data + vbo, use); 1264 u64 vbo; local 1278 vbo = page->index << PAGE_SHIFT; 1280 if (vbo < data_size) { 1283 u32 use = data_size - vbo; 1287 memcpy(data + vbo, kadd 1377 u64 vbo[2], off[2], wof_size; local 1847 attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes) argument 2105 attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size) argument 2323 attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes) argument [all...] |
H A D | file.c | 176 * It zeroes a range [vbo, vbo_to). 178 static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to) argument 183 pgoff_t idx = vbo >> PAGE_SHIFT; 184 u32 from = vbo & (PAGE_SIZE - 1); 428 * space requests 'len' bytes at 'vbo'. If FALLOC_FL_KEEP_SIZE is set 432 static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len) argument 439 loff_t end = vbo + len; 440 loff_t vbo_down = round_down(vbo, max_t(unsigned long, 502 err = attr_punch_hole(ni, vbo, len, &frame_size); 513 vbo_a = (vbo [all...] |
H A D | fsntfs.c | 774 u64 vbo; local 786 vbo = (u64)from * rs; 787 for (; from < to; from++, vbo += rs) { 790 err = ntfs_get_bh(sbi, run, vbo, rs, &nb); 1128 u64 vbo, const void *buf, size_t bytes, int sync) 1132 u32 off = vbo & sbi->cluster_mask; 1133 CLST lcn, clen, vcn = vbo >> cluster_bits, vcn_next; 1176 const struct runs_tree *run, u64 vbo) 1183 if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, NULL, NULL)) 1186 lbo = ((u64)lcn << cluster_bits) + (vbo 1127 ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo, const void *buf, size_t bytes, int sync) argument 1175 ntfs_bread_run(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo) argument 1191 ntfs_read_run_nb(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo, void *buf, u32 bytes, struct ntfs_buffers *nb) argument 1309 ntfs_read_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo, struct NTFS_RECORD_HEADER *rhdr, u32 bytes, struct ntfs_buffers *nb) argument 1320 ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo, u32 bytes, struct ntfs_buffers *nb) argument 1497 ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run, struct page **pages, u32 nr_pages, u64 vbo, u32 bytes, enum req_op op) argument 1666 ntfs_vbo_to_lbo(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo, u64 *lbo, u64 *bytes) argument [all...] |
H A D | fslog.c | 892 static inline void *alloc_rsttbl_from_idx(struct RESTART_TABLE **tbl, u32 vbo) argument 901 if (vbo >= bytes) { 906 u32 bytes2idx = vbo - bytes; 918 e = Add2Ptr(rt, vbo); 930 if (off == vbo) { 952 if (off == vbo) { 982 u32 vbo; member in struct:restart_info 1062 u32 vbo = (lsn << log->seq_num_bits) >> (log->seq_num_bits - 3); local 1064 return vbo; 1123 static int read_log_page(struct ntfs_log *log, u32 vbo, argument 1187 u32 skip, vbo; local 1449 u32 vbo = lsn_to_vbo(log, this_lsn); local 1544 check_subseq_log_page(struct ntfs_log *log, const struct RECORD_PAGE_HDR *rp, u32 vbo, u64 seq) argument 2236 u32 vbo = lsn_to_vbo(log, lsn) & ~log->page_mask; local 3042 u64 vbo = cbo + tvo; local 3741 u32 vbo, tail, off, dlen; local [all...] |
H A D | record.c | 121 u64 vbo = (u64)mi->rno << sbi->record_bits; local 133 err = ntfs_read_bh(sbi, run, vbo, &rec->rhdr, bpr, &mi->nb); 152 vbo >> sbi->cluster_bits); 162 err = ntfs_read_bh(sbi, run, vbo, &rec->rhdr, bpr, &mi->nb); 406 u64 vbo = (u64)rno << sbi->record_bits; local 447 err = ntfs_get_bh(sbi, &ni->file.run, vbo, sbi->record_size,
|
H A D | frecord.c | 948 u64 vbo; local 979 vbo = is_mft_data ? ((u64)svcn << sbi->cluster_bits) : 0; 995 vbo <= ((u64)mi->rno << sbi->record_bits))) { 1042 if (is_mft_data && vbo <= ((u64)rno << sbi->record_bits)) { 1906 __u64 vbo, __u64 len) 1914 CLST vcn = vbo >> cluster_bits; 1956 end = vbo + len; 1963 while (vbo < end) { 2004 vbo = (u64)vcn << cluster_bits; 2024 vbo 1905 ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo, __u64 vbo, __u64 len) argument 2094 u64 frame_vbo, vbo = (u64)index << PAGE_SHIFT; local 2184 u64 vbo; local [all...] |
H A D | index.c | 208 size_t data_size, valid_size, vbo, off = bit >> 3; local 263 vbo = off & ~(size_t)sbi->block_mask; 265 bbuf->new_valid = vbo + blocksize; 271 if (vbo >= valid_size) { 273 } else if (vbo + blocksize > valid_size) { 375 size_t vbo = from >> 3; local 376 sector_t blk = (vbo & sbi->cluster_mask) >> sb->s_blocksize_bits; 377 sector_t vblock = vbo >> sb->s_blocksize_bits; 390 vcn = vbo >> sbi->cluster_bits; 422 vbo 940 u64 vbo = (u64)vbn << indx->vbn2vbo_bits; local 1044 u64 vbo = (u64)vbn << indx->vbn2vbo_bits; local [all...] |
H A D | ntfs_fs.h | 452 int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes); 453 int attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes); 454 int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size); 565 __u64 vbo, __u64 len); 621 u64 vbo, const void *buf, size_t bytes, int sync); 623 const struct runs_tree *run, u64 vbo); 625 u64 vbo, void *buf, u32 bytes, struct ntfs_buffers *nb); 626 int ntfs_read_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo, 629 int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo, 634 struct page **pages, u32 nr_pages, u64 vbo, u3 [all...] |
H A D | bitmap.c | 508 size_t wpos, wbit, iw, vbo; local 517 vbo = 0; 528 vbo * 8 - prev_tail, 543 u32 off = vbo & sbi->cluster_mask; 545 if (!run_lookup_entry(&wnd->run, vbo >> cluster_bits, 569 wbit = vbo * 8; 613 vbo += blocksize; 682 size_t vbo; local 690 vbo = (u64)iw << sb->s_blocksize_bits; 692 if (!run_lookup_entry(&wnd->run, vbo >> sb 1374 u64 vbo, lbo, bytes; local [all...] |
H A D | inode.c | 558 static noinline int ntfs_get_block_vbo(struct inode *inode, u64 vbo, argument 589 vcn = vbo >> cluster_bits; 590 off = vbo & sbi->cluster_mask; 625 if (vbo >= valid) 632 if (vbo >= valid) 635 if (vbo + bytes > valid) { 636 ni->i_valid = vbo + bytes; 639 } else if (vbo >= valid) { 642 } else if (vbo + bytes <= valid) { 644 } else if (vbo 784 loff_t vbo = iocb->ki_pos; local [all...] |
/linux-master/drivers/gpu/drm/imx/ipuv3/ |
H A D | ipuv3-plane.c | 377 unsigned long eba, ubo, vbo, old_ubo, old_vbo, alpha_eba; local 466 vbo = drm_plane_state_to_vbo(new_state); 468 if (vbo & 0x7 || vbo > 0xfffff8) 473 if (vbo != old_vbo) 586 unsigned long eba, ubo, vbo; local 710 vbo = drm_plane_state_to_vbo(new_state); 714 swap(ubo, vbo); 717 fb->pitches[1], ubo, vbo); 720 "phy = %lu %lu %lu, x = %d, y = %d", eba, ubo, vbo, [all...] |
/linux-master/drivers/gpu/drm/vc4/ |
H A D | vc4_validate.c | 899 struct drm_gem_dma_object *vbo = local 908 to_vc4_bo(&vbo->base)->write_seqno); 913 if (vbo->base.size < offset || 914 vbo->base.size - offset < attr_size) { 916 offset, attr_size, vbo->base.size); 921 max_index = ((vbo->base.size - offset - attr_size) / 931 *(uint32_t *)(pkt_v + o) = vbo->dma_addr + offset;
|