Searched refs:pbn (Results 1 - 25 of 44) sorted by relevance

12

/linux-master/fs/nilfs2/
H A Dgcinode.c41 * @pbn - physical block number of the block
46 * specified by @pbn to the GC pagecache with the key @blkoff.
47 * This function sets @vbn (@pbn if @vbn is zero) in b_blocknr of the buffer.
56 * %-ENOENT - The block specified with @pbn does not exist.
59 sector_t pbn, __u64 vbn,
72 if (pbn == 0) {
75 err = nilfs_dat_translate(nilfs->ns_dat, vbn, &pbn);
90 bh->b_blocknr = pbn;
111 * @pbn - physical block number for the block
116 * specified by @vbn to the GC pagecache. @pbn ca
58 nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff, sector_t pbn, __u64 vbn, struct buffer_head **out_bh) argument
126 nilfs_gccache_submit_read_node(struct inode *inode, sector_t pbn, __u64 vbn, struct buffer_head **out_bh) argument
[all...]
/linux-master/drivers/md/dm-vdo/
H A Drepair.c105 physical_block_number_t pbn; member in struct:repair_completion
145 if (mapping1->block_map_slot.pbn != mapping2->block_map_slot.pbn)
146 return mapping1->block_map_slot.pbn < mapping2->block_map_slot.pbn;
435 if (mapping.pbn == VDO_ZERO_BLOCK)
438 if (!vdo_is_physical_data_block(depot, mapping.pbn)) {
447 result = vdo_adjust_reference_count_for_rebuild(depot, mapping.pbn,
455 slot, (unsigned long long) mapping.pbn);
482 if (vdo_get_block_map_page_pbn(page) == repair->last_slot.pbn) {
517 physical_block_number_t pbn = VDO_ZERO_BLOCK; local
544 physical_block_number_t pbn = get_pbn_to_fetch(repair, block_map); local
611 process_entry(physical_block_number_t pbn, struct vdo_completion *completion) argument
810 physical_block_number_t pbn; local
1028 physical_block_number_t pbn; local
1164 physical_block_number_t pbn = local
1694 physical_block_number_t pbn = journal->origin; local
[all...]
H A Dphysical-zone.h100 physical_block_number_t pbn);
103 physical_block_number_t pbn,
H A Ddump.c162 vdo_log_info(" %s is locked. Waited on by: vio %px pbn %llu lbn %llu d-pbn %llu lastOp %s",
163 wait_on, data_vio, data_vio->allocation.pbn, data_vio->logical.lbn,
164 data_vio->duplicate.pbn, get_data_vio_operation_name(data_vio));
168 vdo_log_info(" ... and : vio %px pbn %llu lbn %llu d-pbn %llu lastOp %s",
169 data_vio, data_vio->allocation.pbn, data_vio->logical.lbn,
170 data_vio->duplicate.pbn,
239 data_vio->allocation.pbn, data_vio->logical.lbn,
240 data_vio->duplicate.pbn);
[all...]
H A Dslab-depot.h205 * (254) the reference count is stored in counters[pbn].
507 physical_block_number_t pbn,
514 physical_block_number_t pbn,
524 physical_block_number_t pbn,
537 physical_block_number_t pbn);
555 physical_block_number_t pbn);
558 physical_block_number_t pbn);
561 physical_block_number_t pbn);
H A Dvio.c45 physical_block_number_t pbn = bio->bi_iter.bi_sector / VDO_SECTORS_PER_BLOCK; local
47 return ((pbn == VDO_GEOMETRY_BLOCK_LOCATION) ? pbn : pbn + vdo->geometry.bio_offset);
174 blk_opf_t bi_opf, physical_block_number_t pbn)
179 pbn -= vdo->geometry.bio_offset;
180 vio->bio_zone = ((pbn / config->thread_counts.bio_rotation_interval) %
186 bio->bi_iter.bi_sector = pbn * VDO_SECTORS_PER_BLOCK;
195 blk_opf_t bi_opf, physical_block_number_t pbn)
201 vdo_set_bio_properties(bio, vio, callback, bi_opf, pbn);
173 vdo_set_bio_properties(struct bio *bio, struct vio *vio, bio_end_io_t callback, blk_opf_t bi_opf, physical_block_number_t pbn) argument
194 vio_reset_bio(struct vio *vio, char *data, bio_end_io_t callback, blk_opf_t bi_opf, physical_block_number_t pbn) argument
280 physical_block_number_t pbn = pbn_from_vio_bio(vio->bio); local
[all...]
H A Dblock-map.h100 * otherwise the page buffer is bound to particular page whose absolute pbn is in the pbn field. If
138 /* the pbn of the page */
139 physical_block_number_t pbn; member in struct:page_info
173 physical_block_number_t pbn; member in struct:vdo_page_completion
285 * @pbn: A PBN of a tree node.
290 typedef int (*vdo_entry_callback_fn)(physical_block_number_t pbn,
302 struct block_map_zone *zone, physical_block_number_t pbn,
320 physical_block_number_t pbn,
364 physical_block_number_t pbn,
[all...]
H A Dblock-map.c190 info->pbn = NO_PAGE;
375 /** set_info_pbn() - Set the pbn for an info, updating the map as needed. */
376 static int __must_check set_info_pbn(struct page_info *info, physical_block_number_t pbn) argument
381 int result = VDO_ASSERT((pbn == NO_PAGE) || (info->pbn == NO_PAGE),
386 if (info->pbn != NO_PAGE)
387 vdo_int_map_remove(cache->page_map, info->pbn);
389 info->pbn = pbn;
391 if (pbn !
442 find_page(struct vdo_page_cache *cache, physical_block_number_t pbn) argument
706 physical_block_number_t pbn = vdo_get_block_map_page_pbn(page); local
766 launch_page_load(struct page_info *info, physical_block_number_t pbn) argument
877 physical_block_number_t *pbn = context; local
890 physical_block_number_t pbn; local
1210 vdo_get_page(struct vdo_page_completion *page_completion, struct block_map_zone *zone, physical_block_number_t pbn, bool writable, void *parent, vdo_action_fn callback, vdo_action_fn error_handler, bool requeue) argument
1386 vdo_copy_valid_page(char *buffer, nonce_t nonce, physical_block_number_t pbn, struct block_map_page *page) argument
1824 physical_block_number_t pbn; local
1877 physical_block_number_t pbn = lock->tree_slots[lock->height - 1].block_map_slot.pbn; local
1953 physical_block_number_t pbn = *((physical_block_number_t *) context); local
2069 physical_block_number_t pbn; local
2168 physical_block_number_t pbn; local
2247 physical_block_number_t pbn; local
3191 vdo_update_block_map_page(struct block_map_page *page, struct data_vio *data_vio, physical_block_number_t pbn, enum block_mapping_state mapping_state, sequence_number_t *recovery_lock) argument
[all...]
H A Dphysical-zone.c206 * return_pbn_lock_to_pool() - Return a pbn lock to its pool.
411 * @pbn: The physical block number whose lock is desired.
416 physical_block_number_t pbn)
418 return ((zone == NULL) ? NULL : vdo_int_map_get(zone->pbn_operations, pbn));
425 * @pbn: The physical block number to lock.
437 physical_block_number_t pbn,
454 result = vdo_int_map_put(zone->pbn_operations, pbn, new_lock, false,
465 (unsigned long long) pbn);
491 result = vdo_allocate_block(allocation->zone->allocator, &allocation->pbn);
495 result = vdo_attempt_physical_zone_pbn_lock(allocation->zone, allocation->pbn,
415 vdo_get_physical_zone_pbn_lock(struct physical_zone *zone, physical_block_number_t pbn) argument
436 vdo_attempt_physical_zone_pbn_lock(struct physical_zone *zone, physical_block_number_t pbn, enum pbn_lock_type type, struct pbn_lock **lock_ptr) argument
[all...]
H A Ddata-vio.h85 physical_block_number_t pbn; member in struct:zoned_pbn
149 physical_block_number_t pbn; member in struct:allocation
320 return (data_vio->allocation.pbn != VDO_ZERO_BLOCK);
435 (unsigned long long) data_vio->allocation.pbn, thread_id,
465 (unsigned long long) data_vio->duplicate.pbn, thread_id,
495 (unsigned long long) data_vio->mapped.pbn, thread_id, expected);
512 (unsigned long long) data_vio->new_mapped.pbn, thread_id,
H A Dencodings.h155 __le64 pbn; member in struct:block_map_page_header
840 vdo_pack_block_map_entry(physical_block_number_t pbn, enum block_mapping_state mapping_state) argument
844 .pbn_high_nibble = ((pbn >> 32) & 0x0F),
845 .pbn_low_word = __cpu_to_le32(pbn & UINT_MAX),
855 .pbn = ((high4 << 32) | low32),
867 if (location->pbn == VDO_ZERO_BLOCK)
876 return __le64_to_cpu(page->header.pbn);
880 physical_block_number_t pbn,
885 physical_block_number_t pbn);
911 .pbn_high_nibble = (entry->slot.pbn >> 3
[all...]
H A Dtypes.h141 physical_block_number_t pbn; member in struct:block_map_slot
166 physical_block_number_t pbn; member in struct:data_location
H A Dvio.h122 blk_opf_t bi_opf, physical_block_number_t pbn);
125 blk_opf_t bi_opf, physical_block_number_t pbn);
H A Dslab-depot.c318 physical_block_number_t pbn; local
343 pbn = (depot->summary_origin +
346 vdo_submit_metadata_vio(&block->vio, pbn, write_slab_summary_endio,
896 * @pbn: The pbn being adjusted.
903 static void add_entry(struct slab_journal *journal, physical_block_number_t pbn, argument
934 pbn - journal->slab->start, operation, increment);
948 * @pbn: The PBN for the entry.
957 bool vdo_attempt_replay_into_slab(struct vdo_slab *slab, physical_block_number_t pbn, argument
999 add_entry(journal, pbn, operatio
1188 physical_block_number_t pbn; local
1295 slab_block_number_from_pbn(struct vdo_slab *slab, physical_block_number_t pbn, slab_block_number *slab_block_number_ptr) argument
1318 get_reference_counter(struct vdo_slab *slab, physical_block_number_t pbn, vdo_refcount_t **counter_ptr) argument
1941 vdo_adjust_reference_count_for_rebuild(struct slab_depot *depot, physical_block_number_t pbn, enum journal_operation operation) argument
3106 vdo_acquire_provisional_reference(struct vdo_slab *slab, physical_block_number_t pbn, struct pbn_lock *lock) argument
3262 vdo_release_block_reference(struct block_allocator *allocator, physical_block_number_t pbn) argument
3417 physical_block_number_t pbn; local
4311 get_slab_number(const struct slab_depot *depot, physical_block_number_t pbn, slab_count_t *slab_number_ptr) argument
4338 vdo_get_slab(const struct slab_depot *depot, physical_block_number_t pbn) argument
4365 vdo_get_increment_limit(struct slab_depot *depot, physical_block_number_t pbn) argument
4391 vdo_is_physical_data_block(const struct slab_depot *depot, physical_block_number_t pbn) argument
[all...]
H A Ddata-vio.c1359 (unsigned long long) data_vio->new_mapped.pbn,
1360 (unsigned long long) data_vio->mapped.pbn,
1361 (unsigned long long) data_vio->allocation.pbn,
1407 VDO_ASSERT_LOG_ONLY((allocation->pbn == VDO_ZERO_BLOCK),
1419 * @reset: If true, the allocation will be reset (i.e. any allocated pbn will be forgotten).
1426 physical_block_number_t locked_pbn = allocation->pbn;
1431 allocation->pbn = VDO_ZERO_BLOCK;
1568 if (data_vio->mapped.pbn == VDO_ZERO_BLOCK) {
1577 read_endio, REQ_OP_READ, data_vio->mapped.pbn);
1583 data_vio->mapped.pbn);
[all...]
H A Ddedupe.c517 data_vio->is_duplicate = (source.pbn != VDO_ZERO_BLOCK);
699 vdo_release_physical_zone_pbn_lock(agent->duplicate.zone, agent->duplicate.pbn,
1208 agent->duplicate.pbn);
1284 struct vdo_slab *slab = vdo_get_slab(depot, agent->duplicate.pbn);
1285 int result = vdo_acquire_provisional_reference(slab, agent->duplicate.pbn, lock);
1294 agent->duplicate.pbn, lock);
1327 increment_limit = vdo_get_increment_limit(depot, agent->duplicate.pbn);
1338 result = vdo_attempt_physical_zone_pbn_lock(zone, agent->duplicate.pbn,
1622 advice->pbn = get_unaligned_le64(&encoding->data[offset]);
1627 if ((advice->state == VDO_MAPPING_STATE_UNMAPPED) || (advice->pbn
[all...]
H A Dpacker.c261 .pbn = allocation->pbn,
284 * Process all the non-agent waiters first to ensure that the pbn lock can not be released
307 (unsigned long long) allocation->pbn);
469 REQ_OP_WRITE, agent->allocation.pbn);
H A Drecovery-journal.c1197 has_decrement = (data_vio->decrement_updater.zpbn.pbn != VDO_ZERO_BLOCK);
1198 if ((data_vio->increment_updater.zpbn.pbn != VDO_ZERO_BLOCK) || !has_decrement)
1345 .pbn = data_vio->increment_updater.zpbn.pbn,
1349 .pbn = data_vio->decrement_updater.zpbn.pbn,
/linux-master/drivers/bluetooth/
H A Dhci_ag6xx.c228 "intel/ibt-hw-%x.%x.%x-fw-%x.%x.%x.%x.%x.pbn",
254 struct pbn_entry *pbn = (void *)fw_ptr; local
257 if (pbn->addr == 0xffffffff) {
263 addr = le32_to_cpu(pbn->addr);
264 plen = le32_to_cpu(pbn->plen);
266 if (fw->data + fw->size <= pbn->data + plen) {
274 err = intel_mem_write(hdev, addr, plen, pbn->data);
280 fw_ptr = pbn->data + plen;
/linux-master/drivers/gpu/drm/amd/display/amdgpu_dm/
H A Damdgpu_dm_mst_types.c855 params[i].timing->dsc_cfg.mst_pbn = vars[i + k].pbn;
866 DRM_DEBUG_DRIVER("dsc=%d bits_per_pixel=%d pbn=%d\n",
869 vars[i + k].pbn);
873 static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn) argument
884 kbps = div_u64((u64)pbn * 994 * 8 * 54, 64);
918 kbps_to_peak_pbn(params[i].bw_range.max_kbps, fec_overhead_multiplier_x1000) - vars[i + k].pbn;
945 link_timeslots_used += DIV_ROUND_UP(vars[i + k].pbn, dfixed_trunc(mst_state->pbn_div));
951 vars[next_index].pbn += fair_pbn_alloc;
955 vars[next_index].pbn);
961 vars[next_index].bpp_x16 = bpp_x16_from_pbn(params[next_index], vars[next_index].pbn);
1572 kbps_from_pbn(unsigned int pbn) argument
1604 int bpp, pbn, branch_max_throughput_mps = 0; local
[all...]
H A Damdgpu_dm_mst_types.h76 int pbn; member in struct:dsc_mst_fairness_vars
/linux-master/drivers/gpu/drm/nouveau/include/nvif/
H A Doutp.h115 u8 start_slot, u8 num_slots, u16 pbn, u16 aligned_pbn);
/linux-master/include/drm/display/
H A Ddrm_dp_mst_helper.h341 u16 pbn; member in struct:drm_dp_allocate_payload
567 /** @pbn: The payload bandwidth for this payload */
568 int pbn; member in struct:drm_dp_mst_atomic_payload
902 struct drm_dp_mst_port *port, int pbn);
905 int pbn, bool enable);
/linux-master/drivers/gpu/drm/nouveau/nvkm/engine/disp/
H A Dtu102.c34 tu102_sor_dp_vcpi(struct nvkm_ior *sor, int head, u8 slot, u8 slot_nr, u16 pbn, u16 aligned) argument
39 nvkm_mask(device, 0x61657c + hoff, 0xffffffff, (aligned << 16) | pbn);
/linux-master/drivers/gpu/drm/nouveau/nvif/
H A Doutp.c30 u8 start_slot, u8 num_slots, u16 pbn, u16 aligned_pbn)
39 args.pbn = pbn;
44 "[DP_MST_VCPI head:%d start_slot:%02x num_slots:%02x pbn:%04x aligned_pbn:%04x]",
45 args.head, args.start_slot, args.num_slots, args.pbn, args.aligned_pbn);
29 nvif_outp_dp_mst_vcpi(struct nvif_outp *outp, int head, u8 start_slot, u8 num_slots, u16 pbn, u16 aligned_pbn) argument

Completed in 193 milliseconds

12